CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package view
import (
"fmt"
"html/template"
"log"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/google/shenzhen-go/graph"
)
// TODO: Replace these cobbled-together UIs with Polymer or something.
const channelEditorTemplateSrc = `<head>
<title>{{if .Name}}{{.Name}}{{else}}[New]{{end}}</title><style>` + css + `</style>
</head>
<body>
<h1>{{if .Name}}{{.Name}}{{else}}[New]{{end}}</h1>
{{if .Name}}
<a href="?channel={{.Name}}&clone">Clone</a> |
<a href="?channel={{.Name}}&delete">Delete</a>
{{end}}
<form method="post">
<div class="formfield">
<label for="Name">Name</label>
<input type="text" name="Name" required pattern="^[_a-zA-Z][_a-zA-Z0-9]*$" title="Must start with a letter or underscore, and only contain letters, digits, or underscores." value="{{.Name}}">
</div>
<div class="formfield">
<label for="Type">Type</label>
<input type="text" name="Type" required value="{{.Type}}">
</div>
<div class="formfield">
<label for="Cap">Capacity</label>
<input type="text" name="Cap" required pattern="^[0-9]+$" title="Must be a whole number, at least 0." value="{{.Cap}}">
</div>
<div class="formfield hcentre">
<input type="submit" value="Save">
<input type="button" value="Return" onclick="window.location.href='?'">
</div>
</form>
</body>`
var (
channelEditorTemplate = template.Must(template.New("channelEditor").Parse(channelEditorTemplateSrc))
identifierRE = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
)
// Channel handles viewing/editing a channel.
func Channel(g *graph.Graph, name string, w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s", r.Method, r.URL)
q := r.URL.Query()
_, clone := q["clone"]
_, del := q["delete"]
var e *graph.Channel
if name == "new" {
if clone || del {
http.Error(w, "Asked for a new channel, but also to clone or delete the channel", http.StatusBadRequest)
return
}
e = new(graph.Channel)
} else {
e1, ok := g.Channels[name]
if !ok {
http.Error(w, fmt.Sprintf("Channel %q not found", name), http.StatusNotFound)
return
}
e = e1
}
switch {
case clone:
e2 := *e
e2.Name = ""
e = &e2
case del:
delete(g.Channels, e.Name)
u := *r.URL
u.RawQuery = ""
log.Printf("redirecting to %v", &u)
http.Redirect(w, r, u.String(), http.StatusSeeOther) // should cause GET
return
}
var err error
switch r.Method {
case "POST":
err = handleChannelPost(g, e, w, r)
case "GET":
err = channelEditorTemplate.Execute(w, e)
default:
err = fmt.Errorf("unsupported verb %q", r.Method)
}
if err != nil {
log.Printf("Could not handle request: %v", err)
http.Error(w, "Could not handle request", http.StatusInternalServerError)
}
}
func handleChannelPost(g *graph.Graph, e *graph.Channel, w http.ResponseWriter, r *http.Request) error {
if err := r.ParseForm(); err != nil {
return err
}
// Validate.
nn := r.FormValue("Name")
if !identifierRE.MatchString(nn) {
return fmt.Errorf("invalid name [%q !~ %q]", nn, identifierRE)
}
ci, err := strconv.Atoi(r.FormValue("Cap"))
if err != nil {
return err
}
if ci < 0 {
return fmt.Errorf("invalid capacity [%d < 0]", ci)
}
// Update.
e.Type = r.FormValue("Type")
e.Cap = ci
// No name change? No need to readjust the map or redirect.
// So render the usual editor.
if nn == e.Name {
return channelEditorTemplate.Execute(w, e)
}
// Do name changes last since they cause a redirect.
if e.Name != "" {
delete(g.Channels, e.Name)
}
e.Name = nn
g.Channels[nn] = e
q := url.Values{
"channel": []string{nn},
}
u := *r.URL
u.RawQuery = q.Encode()
log.Printf("redirecting to %v", u)
http.Redirect(w, r, u.String(), http.StatusSeeOther) // should cause GET
return nil
}
Channel capacity = type "number"
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package view
import (
"fmt"
"html/template"
"log"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/google/shenzhen-go/graph"
)
// TODO: Replace these cobbled-together UIs with Polymer or something.
const channelEditorTemplateSrc = `<head>
<title>{{if .Name}}{{.Name}}{{else}}[New]{{end}}</title><style>` + css + `</style>
</head>
<body>
<h1>{{if .Name}}{{.Name}}{{else}}[New]{{end}}</h1>
{{if .Name}}
<a href="?channel={{.Name}}&clone">Clone</a> |
<a href="?channel={{.Name}}&delete">Delete</a>
{{end}}
<form method="post">
<div class="formfield">
<label for="Name">Name</label>
<input type="text" name="Name" required pattern="^[_a-zA-Z][_a-zA-Z0-9]*$" title="Must start with a letter or underscore, and only contain letters, digits, or underscores." value="{{.Name}}">
</div>
<div class="formfield">
<label for="Type">Type</label>
<input type="text" name="Type" required value="{{.Type}}">
</div>
<div class="formfield">
<label for="Cap">Capacity</label>
<input type="number" name="Cap" required pattern="^[0-9]+$" title="Must be a whole number, at least 0." value="{{.Cap}}">
</div>
<div class="formfield hcentre">
<input type="submit" value="Save">
<input type="button" value="Return" onclick="window.location.href='?'">
</div>
</form>
</body>`
var (
channelEditorTemplate = template.Must(template.New("channelEditor").Parse(channelEditorTemplateSrc))
identifierRE = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
)
// Channel handles viewing/editing a channel.
func Channel(g *graph.Graph, name string, w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s", r.Method, r.URL)
q := r.URL.Query()
_, clone := q["clone"]
_, del := q["delete"]
var e *graph.Channel
if name == "new" {
if clone || del {
http.Error(w, "Asked for a new channel, but also to clone or delete the channel", http.StatusBadRequest)
return
}
e = new(graph.Channel)
} else {
e1, ok := g.Channels[name]
if !ok {
http.Error(w, fmt.Sprintf("Channel %q not found", name), http.StatusNotFound)
return
}
e = e1
}
switch {
case clone:
e2 := *e
e2.Name = ""
e = &e2
case del:
delete(g.Channels, e.Name)
u := *r.URL
u.RawQuery = ""
log.Printf("redirecting to %v", &u)
http.Redirect(w, r, u.String(), http.StatusSeeOther) // should cause GET
return
}
var err error
switch r.Method {
case "POST":
err = handleChannelPost(g, e, w, r)
case "GET":
err = channelEditorTemplate.Execute(w, e)
default:
err = fmt.Errorf("unsupported verb %q", r.Method)
}
if err != nil {
log.Printf("Could not handle request: %v", err)
http.Error(w, "Could not handle request", http.StatusInternalServerError)
}
}
func handleChannelPost(g *graph.Graph, e *graph.Channel, w http.ResponseWriter, r *http.Request) error {
if err := r.ParseForm(); err != nil {
return err
}
// Validate.
nn := r.FormValue("Name")
if !identifierRE.MatchString(nn) {
return fmt.Errorf("invalid name [%q !~ %q]", nn, identifierRE)
}
ci, err := strconv.Atoi(r.FormValue("Cap"))
if err != nil {
return err
}
if ci < 0 {
return fmt.Errorf("invalid capacity [%d < 0]", ci)
}
// Update.
e.Type = r.FormValue("Type")
e.Cap = ci
// No name change? No need to readjust the map or redirect.
// So render the usual editor.
if nn == e.Name {
return channelEditorTemplate.Execute(w, e)
}
// Do name changes last since they cause a redirect.
if e.Name != "" {
delete(g.Channels, e.Name)
}
e.Name = nn
g.Channels[nn] = e
q := url.Values{
"channel": []string{nn},
}
u := *r.URL
u.RawQuery = q.Encode()
log.Printf("redirecting to %v", u)
http.Redirect(w, r, u.String(), http.StatusSeeOther) // should cause GET
return nil
}
|
package editor
import (
"fmt"
"strings"
"unicode"
"github.com/elpinal/coco3/editor/register"
)
type searchRange [][2]int
type editor struct {
basic
register.Registers
undoTree
history [][]rune
age int
sp string // search pattern
sr searchRange
}
func newEditor() *editor {
r := register.Registers{}
r.Init()
return &editor{
undoTree: newUndoTree(),
Registers: r,
sr: make([][2]int, 2),
}
}
func (e *editor) yank(r rune, from, to int) {
s := e.slice(from, to)
e.Register(r, s)
}
func (e *editor) put(r rune, at int) {
s := e.Read(r)
e.insert(s, at)
}
func isKeyword(ch rune) bool {
if 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || '0' <= ch && ch <= '9' || ch == '_' || 192 <= ch && ch <= 255 {
return true
}
return false
}
func isWhitespace(ch rune) bool {
if ch == ' ' || ch == '\t' {
return true
}
return false
}
func (e *editor) wordForward() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
e.pos = i
return
}
case isKeyword(ch):
if i := e.indexFunc(isKeyword, e.pos+1, false); i > 0 {
if !isWhitespace(e.buf[i]) {
e.pos = i
return
}
if i := e.indexFunc(isWhitespace, i+1, false); i > 0 {
e.pos = i
return
}
}
default:
if i := e.indexFunc(func(r rune) bool { return isWhitespace(r) || isKeyword(r) }, e.pos+1, true); i > 0 {
if isKeyword(e.buf[i]) {
e.pos = i
return
}
if i := e.indexFunc(isWhitespace, i+1, false); i > 0 {
e.pos = i
return
}
}
}
e.pos = len(e.buf)
}
func (e *editor) wordBackward() {
switch e.pos {
case 0:
return
case 1:
e.pos = 0
return
}
n := e.pos - 1
switch ch := e.buf[n]; {
case isWhitespace(ch):
n = e.lastIndexFunc(isWhitespace, n, false)
if n < 0 {
e.pos = 0
return
}
}
switch ch := e.buf[n]; {
case isKeyword(ch):
if i := e.lastIndexFunc(isKeyword, n, false); i >= 0 {
e.pos = i + 1
return
}
default:
for i := n - 1; i >= 0; i-- {
switch ch := e.buf[i]; {
case isKeyword(ch), isWhitespace(ch):
e.pos = i + 1
return
}
}
}
e.pos = 0
}
func (e *editor) wordForwardNonBlank() {
i := e.indexFunc(isWhitespace, e.pos, true)
if i < 0 {
e.pos = len(e.buf)
return
}
i = e.indexFunc(isWhitespace, i+1, false)
if i < 0 {
e.pos = len(e.buf)
return
}
e.pos = i
}
func (e *editor) wordBackwardNonBlank() {
i := e.lastIndexFunc(isWhitespace, e.pos, false)
if i < 0 {
e.pos = 0
return
}
i = e.lastIndexFunc(isWhitespace, i, true)
if i < 0 {
e.pos = 0
return
}
e.pos = i + 1
}
func (e *editor) wordEnd() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
e.pos++
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
switch ch := e.buf[i]; {
case isKeyword(ch):
if i := e.indexFunc(isKeyword, i+1, false); i > 0 {
e.pos = i - 1
return
}
default:
if i := e.indexFunc(func(r rune) bool { return !isWhitespace(r) && !isKeyword(r) }, i+1, false); i > 0 {
e.pos = i - 1
return
}
}
}
case isKeyword(ch):
if i := e.indexFunc(isKeyword, e.pos+1, false); i > 0 {
e.pos = i - 1
return
}
default:
if i := e.indexFunc(func(r rune) bool { return !isWhitespace(r) && !isKeyword(r) }, e.pos+1, false); i > 0 {
e.pos = i - 1
return
}
}
e.pos = len(e.buf)
}
func (e *editor) wordEndNonBlank() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
e.pos++
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
if i := e.indexFunc(isWhitespace, i+1, true); i > 0 {
e.pos = i - 1
return
}
}
default:
if i := e.indexFunc(isWhitespace, e.pos+1, true); i > 0 {
e.pos = i - 1
return
}
}
e.pos = len(e.buf)
}
func (e *editor) toUpper(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
e.replace([]rune(strings.ToUpper(string(e.slice(from, to)))), at)
}
func (e *editor) toLower(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
e.replace([]rune(strings.ToLower(string(e.slice(from, to)))), at)
}
func swapCase(xs []rune) {
for i, r := range xs {
if unicode.IsLower(r) {
xs[i] = unicode.ToUpper(r)
} else if unicode.IsUpper(r) {
xs[i] = unicode.ToLower(r)
}
}
}
func (e *editor) swapCase(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
xs := e.slice(from, to)
swapCase(xs)
e.replace(xs, at)
}
func (e *editor) currentWord(include bool) (from, to int) {
if len(e.buf) == 0 {
return 0, 0
}
if e.pos == len(e.buf) {
return -1, -1
}
f := func(r rune) bool { return !(isKeyword(r) || isWhitespace(r)) }
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
f = isWhitespace
case isKeyword(ch):
f = isKeyword
}
from = e.lastIndexFunc(f, e.pos, false) + 1
to = e.indexFunc(f, e.pos, false)
if to < 0 {
to = len(e.buf)
}
if include && to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if include && from > 0 && isWhitespace(e.buf[from-1]) {
from--
return
}
return
}
func (e *editor) currentWordNonBlank(include bool) (from, to int) {
if len(e.buf) == 0 {
return 0, 0
}
if e.pos == len(e.buf) {
return -1, -1
}
f := func(r rune) bool { return !isWhitespace(r) }
if isWhitespace(e.buf[e.pos]) {
f = isWhitespace
}
from = e.lastIndexFunc(f, e.pos, false) + 1
to = e.indexFunc(f, e.pos, false)
if to < 0 {
to = len(e.buf)
}
if include && to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if include && from > 0 && isWhitespace(e.buf[from-1]) {
from--
return
}
return
}
func (e *editor) currentQuote(include bool, quote rune) (from, to int) {
if len(e.buf) == 0 {
return
}
if e.buf[e.pos] == quote {
n := strings.Count(string(e.buf[:e.pos]), string(quote))
if n%2 == 0 {
// expect `to` as the position of the even-numbered quote
to = e.index(quote, e.pos+1)
from = e.pos
} else {
// expect `to` as the position of the odd-numbered quote
from = e.lastIndex(quote, e.pos)
to = e.pos
}
} else {
from = e.lastIndex(quote, e.pos)
if from < 0 {
return
}
to = e.index(quote, e.pos)
}
if to < 0 {
return
}
if include {
to++
if to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if from > 0 && isWhitespace(e.buf[from-1]) {
from--
}
return
}
from++
return
}
func (e *editor) currentParen(include bool, p1, p2 rune) (from, to int) {
if len(e.buf) == 0 {
return
}
if e.pos == len(e.buf) {
return -1, -1
}
switch e.buf[e.pos] {
case p1:
to = e.index(p2, e.pos+1)
from = e.pos
case p2:
from = e.lastIndex(p1, e.pos)
to = e.pos
default:
from = e.lastIndex(p1, e.pos)
if from < 0 {
return
}
to = e.index(p2, e.pos)
}
if to < 0 {
return
}
if include {
to++
if to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if from > 0 && isWhitespace(e.buf[from-1]) {
from--
}
return
}
from++
return
}
func (e *editor) charSearch(r rune) (int, error) {
i := strings.IndexRune(string(e.slice(e.pos+1, len(e.buf))), r)
if i < 0 {
return 0, fmt.Errorf("pattern not found: %c", r)
}
return e.pos + 1 + i, nil
}
func (e *editor) charSearchBackward(r rune) (int, error) {
i := strings.LastIndex(string(e.slice(0, e.pos)), string(r))
if i < 0 {
return 0, fmt.Errorf("pattern not found: %c", r)
}
return i, nil
}
func (e *editor) undo() {
s, ok := e.undoTree.undo()
if !ok {
return
}
e.buf = make([]rune, len(s))
copy(e.buf, s)
e.move(0)
}
func (e *editor) redo() {
s, ok := e.undoTree.redo()
if !ok {
return
}
e.buf = make([]rune, len(s))
copy(e.buf, s)
e.move(0)
}
func (e *editor) overwrite(base []rune, cover []rune, at int) []rune {
n := constrain(at, 0, len(base))
s := make([]rune, max(len(base), n+len(cover)))
copy(s[:n], base)
copy(s[n:], cover)
if n+len(cover) < len(base) {
copy(s[n+len(cover):], base[n+len(cover):])
}
return s
}
func (e *editor) search(s string) (found bool) {
e.sp = s
e.sr = e.sr[:0]
if s == "" {
return false
}
off := 0
for {
i := strings.Index(string(e.buf[off:]), s)
if i < 0 {
return len(e.sr) > 0
}
e.sr = append(e.sr, [2]int{off + i, off + i + len(s)})
off += i + len(s)
}
}
func (e *editor) next() int {
found := e.search(e.sp)
if !found {
return e.pos
}
for _, sr := range e.sr {
i := sr[0]
if i > e.pos {
return i
}
}
return e.sr[0][0]
}
func (e *editor) previous() int {
found := e.search(e.sp)
if !found {
return e.pos
}
for n := len(e.sr) - 1; 0 <= n; n-- {
i := e.sr[n][0]
if e.pos > i {
return i
}
}
return e.sr[len(e.sr)-1][0]
}
Avoid panic when cursor is at the end of buffer
package editor
import (
"fmt"
"strings"
"unicode"
"github.com/elpinal/coco3/editor/register"
)
type searchRange [][2]int
type editor struct {
basic
register.Registers
undoTree
history [][]rune
age int
sp string // search pattern
sr searchRange
}
func newEditor() *editor {
r := register.Registers{}
r.Init()
return &editor{
undoTree: newUndoTree(),
Registers: r,
sr: make([][2]int, 2),
}
}
func (e *editor) yank(r rune, from, to int) {
s := e.slice(from, to)
e.Register(r, s)
}
func (e *editor) put(r rune, at int) {
s := e.Read(r)
e.insert(s, at)
}
func isKeyword(ch rune) bool {
if 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || '0' <= ch && ch <= '9' || ch == '_' || 192 <= ch && ch <= 255 {
return true
}
return false
}
func isWhitespace(ch rune) bool {
if ch == ' ' || ch == '\t' {
return true
}
return false
}
func (e *editor) wordForward() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
e.pos = i
return
}
case isKeyword(ch):
if i := e.indexFunc(isKeyword, e.pos+1, false); i > 0 {
if !isWhitespace(e.buf[i]) {
e.pos = i
return
}
if i := e.indexFunc(isWhitespace, i+1, false); i > 0 {
e.pos = i
return
}
}
default:
if i := e.indexFunc(func(r rune) bool { return isWhitespace(r) || isKeyword(r) }, e.pos+1, true); i > 0 {
if isKeyword(e.buf[i]) {
e.pos = i
return
}
if i := e.indexFunc(isWhitespace, i+1, false); i > 0 {
e.pos = i
return
}
}
}
e.pos = len(e.buf)
}
func (e *editor) wordBackward() {
switch e.pos {
case 0:
return
case 1:
e.pos = 0
return
}
n := e.pos - 1
switch ch := e.buf[n]; {
case isWhitespace(ch):
n = e.lastIndexFunc(isWhitespace, n, false)
if n < 0 {
e.pos = 0
return
}
}
switch ch := e.buf[n]; {
case isKeyword(ch):
if i := e.lastIndexFunc(isKeyword, n, false); i >= 0 {
e.pos = i + 1
return
}
default:
for i := n - 1; i >= 0; i-- {
switch ch := e.buf[i]; {
case isKeyword(ch), isWhitespace(ch):
e.pos = i + 1
return
}
}
}
e.pos = 0
}
func (e *editor) wordForwardNonBlank() {
i := e.indexFunc(isWhitespace, e.pos, true)
if i < 0 {
e.pos = len(e.buf)
return
}
i = e.indexFunc(isWhitespace, i+1, false)
if i < 0 {
e.pos = len(e.buf)
return
}
e.pos = i
}
func (e *editor) wordBackwardNonBlank() {
i := e.lastIndexFunc(isWhitespace, e.pos, false)
if i < 0 {
e.pos = 0
return
}
i = e.lastIndexFunc(isWhitespace, i, true)
if i < 0 {
e.pos = 0
return
}
e.pos = i + 1
}
func (e *editor) wordEnd() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
e.pos++
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
switch ch := e.buf[i]; {
case isKeyword(ch):
if i := e.indexFunc(isKeyword, i+1, false); i > 0 {
e.pos = i - 1
return
}
default:
if i := e.indexFunc(func(r rune) bool { return !isWhitespace(r) && !isKeyword(r) }, i+1, false); i > 0 {
e.pos = i - 1
return
}
}
}
case isKeyword(ch):
if i := e.indexFunc(isKeyword, e.pos+1, false); i > 0 {
e.pos = i - 1
return
}
default:
if i := e.indexFunc(func(r rune) bool { return !isWhitespace(r) && !isKeyword(r) }, e.pos+1, false); i > 0 {
e.pos = i - 1
return
}
}
e.pos = len(e.buf)
}
func (e *editor) wordEndNonBlank() {
switch n := len(e.buf) - e.pos; {
case n < 1:
return
case n == 1:
e.pos = len(e.buf)
return
}
e.pos++
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
if i := e.indexFunc(isWhitespace, e.pos+1, false); i > 0 {
if i := e.indexFunc(isWhitespace, i+1, true); i > 0 {
e.pos = i - 1
return
}
}
default:
if i := e.indexFunc(isWhitespace, e.pos+1, true); i > 0 {
e.pos = i - 1
return
}
}
e.pos = len(e.buf)
}
func (e *editor) toUpper(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
e.replace([]rune(strings.ToUpper(string(e.slice(from, to)))), at)
}
func (e *editor) toLower(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
e.replace([]rune(strings.ToLower(string(e.slice(from, to)))), at)
}
func swapCase(xs []rune) {
for i, r := range xs {
if unicode.IsLower(r) {
xs[i] = unicode.ToUpper(r)
} else if unicode.IsUpper(r) {
xs[i] = unicode.ToLower(r)
}
}
}
func (e *editor) swapCase(from, to int) {
at := constrain(min(from, to), 0, len(e.buf))
xs := e.slice(from, to)
swapCase(xs)
e.replace(xs, at)
}
func (e *editor) currentWord(include bool) (from, to int) {
if len(e.buf) == 0 {
return 0, 0
}
if e.pos == len(e.buf) {
return -1, -1
}
f := func(r rune) bool { return !(isKeyword(r) || isWhitespace(r)) }
switch ch := e.buf[e.pos]; {
case isWhitespace(ch):
f = isWhitespace
case isKeyword(ch):
f = isKeyword
}
from = e.lastIndexFunc(f, e.pos, false) + 1
to = e.indexFunc(f, e.pos, false)
if to < 0 {
to = len(e.buf)
}
if include && to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if include && from > 0 && isWhitespace(e.buf[from-1]) {
from--
return
}
return
}
func (e *editor) currentWordNonBlank(include bool) (from, to int) {
if len(e.buf) == 0 {
return 0, 0
}
if e.pos == len(e.buf) {
return -1, -1
}
f := func(r rune) bool { return !isWhitespace(r) }
if isWhitespace(e.buf[e.pos]) {
f = isWhitespace
}
from = e.lastIndexFunc(f, e.pos, false) + 1
to = e.indexFunc(f, e.pos, false)
if to < 0 {
to = len(e.buf)
}
if include && to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if include && from > 0 && isWhitespace(e.buf[from-1]) {
from--
return
}
return
}
func (e *editor) currentQuote(include bool, quote rune) (from, to int) {
if len(e.buf) == 0 {
return
}
if e.pos == len(e.buf) {
return -1, -1
}
if e.buf[e.pos] == quote {
n := strings.Count(string(e.buf[:e.pos]), string(quote))
if n%2 == 0 {
// expect `to` as the position of the even-numbered quote
to = e.index(quote, e.pos+1)
from = e.pos
} else {
// expect `to` as the position of the odd-numbered quote
from = e.lastIndex(quote, e.pos)
to = e.pos
}
} else {
from = e.lastIndex(quote, e.pos)
if from < 0 {
return
}
to = e.index(quote, e.pos)
}
if to < 0 {
return
}
if include {
to++
if to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if from > 0 && isWhitespace(e.buf[from-1]) {
from--
}
return
}
from++
return
}
func (e *editor) currentParen(include bool, p1, p2 rune) (from, to int) {
if len(e.buf) == 0 {
return
}
if e.pos == len(e.buf) {
return -1, -1
}
switch e.buf[e.pos] {
case p1:
to = e.index(p2, e.pos+1)
from = e.pos
case p2:
from = e.lastIndex(p1, e.pos)
to = e.pos
default:
from = e.lastIndex(p1, e.pos)
if from < 0 {
return
}
to = e.index(p2, e.pos)
}
if to < 0 {
return
}
if include {
to++
if to < len(e.buf) && isWhitespace(e.buf[to]) {
to++
return
}
if from > 0 && isWhitespace(e.buf[from-1]) {
from--
}
return
}
from++
return
}
func (e *editor) charSearch(r rune) (int, error) {
i := strings.IndexRune(string(e.slice(e.pos+1, len(e.buf))), r)
if i < 0 {
return 0, fmt.Errorf("pattern not found: %c", r)
}
return e.pos + 1 + i, nil
}
func (e *editor) charSearchBackward(r rune) (int, error) {
i := strings.LastIndex(string(e.slice(0, e.pos)), string(r))
if i < 0 {
return 0, fmt.Errorf("pattern not found: %c", r)
}
return i, nil
}
func (e *editor) undo() {
s, ok := e.undoTree.undo()
if !ok {
return
}
e.buf = make([]rune, len(s))
copy(e.buf, s)
e.move(0)
}
func (e *editor) redo() {
s, ok := e.undoTree.redo()
if !ok {
return
}
e.buf = make([]rune, len(s))
copy(e.buf, s)
e.move(0)
}
func (e *editor) overwrite(base []rune, cover []rune, at int) []rune {
n := constrain(at, 0, len(base))
s := make([]rune, max(len(base), n+len(cover)))
copy(s[:n], base)
copy(s[n:], cover)
if n+len(cover) < len(base) {
copy(s[n+len(cover):], base[n+len(cover):])
}
return s
}
func (e *editor) search(s string) (found bool) {
e.sp = s
e.sr = e.sr[:0]
if s == "" {
return false
}
off := 0
for {
i := strings.Index(string(e.buf[off:]), s)
if i < 0 {
return len(e.sr) > 0
}
e.sr = append(e.sr, [2]int{off + i, off + i + len(s)})
off += i + len(s)
}
}
func (e *editor) next() int {
found := e.search(e.sp)
if !found {
return e.pos
}
for _, sr := range e.sr {
i := sr[0]
if i > e.pos {
return i
}
}
return e.sr[0][0]
}
func (e *editor) previous() int {
found := e.search(e.sp)
if !found {
return e.pos
}
for n := len(e.sr) - 1; 0 <= n; n-- {
i := e.sr[n][0]
if e.pos > i {
return i
}
}
return e.sr[len(e.sr)-1][0]
}
|
package levant
import (
"fmt"
"os"
"strings"
"time"
nomad "github.com/hashicorp/nomad/api"
nomadStructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/jrasell/levant/client"
"github.com/jrasell/levant/levant/structs"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
// levantDeployment is the all deployment related objects for this Levant
// deployment invocation.
type levantDeployment struct {
nomad *nomad.Client
config *DeployConfig
}
// DeployConfig is the set of config structs required to run a Levant deploy.
type DeployConfig struct {
Deploy *structs.DeployConfig
Client *structs.ClientConfig
Plan *structs.PlanConfig
Template *structs.TemplateConfig
}
// newLevantDeployment sets up the Levant deployment object and Nomad client
// to interact with the Nomad API.
func newLevantDeployment(config *DeployConfig, nomadClient *nomad.Client) (*levantDeployment, error) {
var err error
if config.Deploy.EnvVault == true {
config.Deploy.VaultToken = os.Getenv("VAULT_TOKEN")
}
dep := &levantDeployment{}
dep.config = config
if nomadClient == nil {
dep.nomad, err = client.NewNomadClient(config.Client.Addr)
if err != nil {
return nil, err
}
} else {
dep.nomad = nomadClient
}
// Add the JobID as a log context field.
log.Logger = log.With().Str(structs.JobIDContextField, *config.Template.Job.ID).Logger()
return dep, nil
}
// TriggerDeployment provides the main entry point into a Levant deployment and
// is used to setup the clients before triggering the deployment process.
func TriggerDeployment(config *DeployConfig, nomadClient *nomad.Client) bool {
// Create our new deployment object.
levantDep, err := newLevantDeployment(config, nomadClient)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to setup Levant deployment")
return false
}
// Run the job validation steps and count updater.
preDepVal := levantDep.preDeployValidate()
if !preDepVal {
log.Error().Msg("levant/deploy: pre-deployment validation process failed")
return false
}
// Start the main deployment function.
success := levantDep.deploy()
if !success {
log.Error().Msg("levant/deploy: job deployment failed")
return false
}
log.Info().Msg("levant/deploy: job deployment successful")
return true
}
func (l *levantDeployment) preDeployValidate() (success bool) {
// Validate the job to check it is syntactically correct.
if _, _, err := l.nomad.Jobs().Validate(l.config.Template.Job, nil); err != nil {
log.Error().Err(err).Msg("levant/deploy: job validation failed")
return
}
// If job.Type isn't set we can't continue
if l.config.Template.Job.Type == nil {
log.Error().Msgf("levant/deploy: Nomad job `type` is not set; should be set to `%s`, `%s` or `%s`",
nomadStructs.JobTypeBatch, nomadStructs.JobTypeSystem, nomadStructs.JobTypeService)
return
}
if !l.config.Deploy.ForceCount {
if err := l.dynamicGroupCountUpdater(); err != nil {
return
}
}
return true
}
// deploy triggers a register of the job resulting in a Nomad deployment which
// is monitored to determine the eventual state.
func (l *levantDeployment) deploy() (success bool) {
log.Info().Msgf("levant/deploy: triggering a deployment")
l.config.Template.Job.VaultToken = &l.config.Deploy.VaultToken
eval, _, err := l.nomad.Jobs().Register(l.config.Template.Job, nil)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to register job with Nomad")
return
}
if l.config.Deploy.ForceBatch {
if eval.EvalID, err = l.triggerPeriodic(l.config.Template.Job.ID); err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to trigger periodic instance of job")
return
}
}
// Periodic and parameterized jobs do not return an evaluation and therefore
// can't perform the evaluationInspector unless we are forcing an instance of
// periodic which will yield an EvalID.
if !l.config.Template.Job.IsPeriodic() && !l.config.Template.Job.IsParameterized() ||
l.config.Template.Job.IsPeriodic() && l.config.Deploy.ForceBatch {
// Trigger the evaluationInspector to identify any potential errors in the
// Nomad evaluation run. As far as I can tell from testing; a single alloc
// failure in an evaluation means no allocs will be placed so we exit here.
err = l.evaluationInspector(&eval.EvalID)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: something")
return
}
}
if l.isJobZeroCount() {
return true
}
switch *l.config.Template.Job.Type {
case nomadStructs.JobTypeService:
// If the service job doesn't have an update stanza, the job will not use
// Nomad deployments.
if l.config.Template.Job.Update == nil {
log.Info().Msg("levant/deploy: job is not configured with update stanza, consider adding to use deployments")
return l.jobStatusChecker(&eval.EvalID)
}
log.Info().Msgf("levant/deploy: beginning deployment watcher for job")
// Get the deploymentID from the evaluationID so that we can watch the
// deployment for end status.
depID, err := l.getDeploymentID(eval.EvalID)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of evaluation %s", eval.EvalID)
return
}
// Get the success of the deployment and return if we have success.
if success = l.deploymentWatcher(depID); success {
return
}
dep, _, err := l.nomad.Deployments().Info(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for auto-revert check", dep.ID)
return
}
// If the job is not a canary job, then run the auto-revert checker, the
// current checking mechanism is slightly hacky and should be updated.
// The reason for this is currently the config.Job is populate from the
// rendered job and so a user could potentially not set canary meaning
// the field shows a null.
if l.config.Template.Job.Update.Canary == nil {
l.checkAutoRevert(dep)
} else if *l.config.Template.Job.Update.Canary == 0 {
l.checkAutoRevert(dep)
}
case nomadStructs.JobTypeBatch:
return l.jobStatusChecker(&eval.EvalID)
case nomadStructs.JobTypeSystem:
return l.jobStatusChecker(&eval.EvalID)
default:
log.Debug().Msgf("levant/deploy: Levant does not support advanced deployments of job type %s",
*l.config.Template.Job.Type)
success = true
}
return
}
func (l *levantDeployment) evaluationInspector(evalID *string) error {
for {
evalInfo, _, err := l.nomad.Evaluations().Info(*evalID, nil)
if err != nil {
return err
}
switch evalInfo.Status {
case nomadStructs.EvalStatusComplete, nomadStructs.EvalStatusFailed, nomadStructs.EvalStatusCancelled:
if len(evalInfo.FailedTGAllocs) == 0 {
log.Info().Msgf("levant/deploy: evaluation %s finished successfully", *evalID)
return nil
}
for group, metrics := range evalInfo.FailedTGAllocs {
// Check if any nodes have been exhausted of resources and therfore are
// unable to place allocs.
if metrics.NodesExhausted > 0 {
var exhausted, dimension []string
for e := range metrics.ClassExhausted {
exhausted = append(exhausted, e)
}
for d := range metrics.DimensionExhausted {
dimension = append(dimension, d)
}
log.Error().Msgf("levant/deploy: task group %s failed to place allocs, failed on %v and exhausted %v",
group, exhausted, dimension)
}
// Check if any node classes were filtered causing alloc placement
// failures.
if len(metrics.ClassFiltered) > 0 {
for f := range metrics.ClassFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as class \"%s\" was filtered",
group, len(metrics.ClassFiltered), f)
}
}
// Check if any node constraints were filtered causing alloc placement
// failures.
if len(metrics.ConstraintFiltered) > 0 {
for cf := range metrics.ConstraintFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as constraint \"%s\" was filtered",
group, len(metrics.ConstraintFiltered), cf)
}
}
}
// Do not return an error here; there could well be information from
// Nomad detailing filtered nodes but the deployment will still be
// successful. GH-220.
return nil
default:
time.Sleep(1 * time.Second)
continue
}
}
}
func (l *levantDeployment) deploymentWatcher(depID string) (success bool) {
var canaryChan chan interface{}
deploymentChan := make(chan interface{})
t := time.Now()
wt := time.Duration(5 * time.Second)
// Setup the canaryChan and launch the autoPromote go routine if autoPromote
// has been enabled.
if l.config.Deploy.Canary > 0 {
canaryChan = make(chan interface{})
go l.canaryAutoPromote(depID, l.config.Deploy.Canary, canaryChan, deploymentChan)
}
q := &nomad.QueryOptions{WaitIndex: 1, AllowStale: l.config.Client.AllowStale, WaitTime: wt}
for {
dep, meta, err := l.nomad.Deployments().Info(depID, q)
log.Debug().Msgf("levant/deploy: deployment %v running for %.2fs", depID, time.Since(t).Seconds())
// Listen for the deploymentChan closing which indicates Levant should exit
// the deployment watcher.
select {
case <-deploymentChan:
return false
default:
break
}
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of deployment %s", depID)
return
}
if meta.LastIndex <= q.WaitIndex {
continue
}
q.WaitIndex = meta.LastIndex
cont, err := l.checkDeploymentStatus(dep, canaryChan)
if err != nil {
return false
}
if cont {
continue
} else {
return true
}
}
}
func (l *levantDeployment) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan chan interface{}) (bool, error) {
switch dep.Status {
case nomadStructs.DeploymentStatusSuccessful:
log.Info().Msgf("levant/deploy: deployment %v has completed successfully", dep.ID)
return false, nil
case nomadStructs.DeploymentStatusRunning:
return true, nil
default:
if shutdownChan != nil {
log.Debug().Msgf("levant/deploy: deployment %v meaning canary auto promote will shutdown", dep.Status)
close(shutdownChan)
}
log.Error().Msgf("levant/deploy: deployment %v has status %s", dep.ID, dep.Status)
// Launch the failure inspector.
l.checkFailedDeployment(&dep.ID)
return false, fmt.Errorf("deployment failed")
}
}
// canaryAutoPromote handles Levant's canary-auto-promote functionality.
func (l *levantDeployment) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) {
// Setup the AutoPromote timer.
autoPromote := time.After(time.Duration(waitTime) * time.Second)
for {
select {
case <-autoPromote:
log.Info().Msgf("levant/deploy: auto-promote period %vs has been reached for deployment %s",
waitTime, depID)
// Check the deployment is healthy before promoting.
if healthy := l.checkCanaryDeploymentHealth(depID); !healthy {
log.Error().Msgf("levant/deploy: the canary deployment %s has unhealthy allocations, unable to promote", depID)
close(deploymentChan)
return
}
log.Info().Msgf("levant/deploy: triggering auto promote of deployment %s", depID)
// Promote the deployment.
_, _, err := l.nomad.Deployments().PromoteAll(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to promote deployment %s", depID)
close(deploymentChan)
return
}
case <-shutdownChan:
log.Info().Msg("levant/deploy: canary auto promote has been shutdown")
return
}
}
}
// checkCanaryDeploymentHealth is used to check the health status of each
// task-group within a canary deployment.
func (l *levantDeployment) checkCanaryDeploymentHealth(depID string) (healthy bool) {
var unhealthy int
dep, _, err := l.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: l.config.Client.AllowStale})
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for health", depID)
return
}
// Itertate each task in the deployment to determine is health status. If an
// unhealthy task is found, incrament the unhealthy counter.
for taskName, taskInfo := range dep.TaskGroups {
// skip any task groups which are not configured for canary deployments
if taskInfo.DesiredCanaries == 0 {
log.Debug().Msgf("levant/deploy: task %s has no desired canaries, skipping health checks in deployment %s", taskName, depID)
continue
}
if taskInfo.DesiredCanaries != taskInfo.HealthyAllocs {
log.Error().Msgf("levant/deploy: task %s has unhealthy allocations in deployment %s", taskName, depID)
unhealthy++
}
}
// If zero unhealthy tasks were found, continue with the auto promotion.
if unhealthy == 0 {
log.Debug().Msgf("levant/deploy: deployment %s has 0 unhealthy allocations", depID)
healthy = true
}
return
}
// triggerPeriodic is used to force an instance of a periodic job outside of the
// planned schedule. This results in an evalID being created that can then be
// checked in the same fashion as other jobs.
func (l *levantDeployment) triggerPeriodic(jobID *string) (evalID string, err error) {
log.Info().Msg("levant/deploy: triggering a run of periodic job")
// Trigger the run if possible and just returning both the evalID and the err.
// There is no need to check this here as the caller does this.
evalID, _, err = l.nomad.Jobs().PeriodicForce(*jobID, nil)
return
}
// getDeploymentID finds the Nomad deploymentID associated to a Nomad
// evaluationID. This is only needed as sometimes Nomad initially returns eval
// info with an empty deploymentID; and a retry is required in order to get the
// updated response from Nomad.
func (l *levantDeployment) getDeploymentID(evalID string) (depID string, err error) {
var evalInfo *nomad.Evaluation
timeout := time.NewTicker(time.Second * 60)
defer timeout.Stop()
for {
select {
case <-timeout.C:
err = errors.New("timeout reached on attempting to find deployment ID")
return
default:
if evalInfo, _, err = l.nomad.Evaluations().Info(evalID, nil); err != nil {
return
}
if evalInfo.DeploymentID != "" {
return evalInfo.DeploymentID, nil
}
log.Debug().Msgf("levant/deploy: Nomad returned an empty deployment for evaluation %v; retrying", evalID)
time.Sleep(2 * time.Second)
continue
}
}
}
// dynamicGroupCountUpdater takes the templated and rendered job and updates the
// group counts based on the currently deployed job; if its running.
func (l *levantDeployment) dynamicGroupCountUpdater() error {
// Gather information about the current state, if any, of the job on the
// Nomad cluster.
rJob, _, err := l.nomad.Jobs().Info(*l.config.Template.Job.Name, &nomad.QueryOptions{})
// This is a hack due to GH-1849; we check the error string for 404 which
// indicates the job is not running, not that there was an error in the API
// call.
if err != nil && strings.Contains(err.Error(), "404") {
log.Info().Msg("levant/deploy: job is not running, using template file group counts")
return nil
} else if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to perform job evaluation")
return err
}
// Check that the job is actually running and not in a potentially stopped
// state.
if *rJob.Status != nomadStructs.JobStatusRunning {
return nil
}
log.Debug().Msgf("levant/deploy: running dynamic job count updater")
// Iterate the templated job and the Nomad returned job and update group count
// based on matches.
for _, rGroup := range rJob.TaskGroups {
for _, group := range l.config.Template.Job.TaskGroups {
if *rGroup.Name == *group.Name {
log.Info().Msgf("levant/deploy: using dynamic count %v for group %s",
*rGroup.Count, *group.Name)
group.Count = rGroup.Count
}
}
}
return nil
}
func (l *levantDeployment) isJobZeroCount() bool {
for _, tg := range l.config.Template.Job.TaskGroups {
if tg.Count == nil {
return false
} else if *tg.Count > 0 {
return false
}
}
return true
}
Fix segfault when accessing dep, which is nil on error.
Use `depID` instead, which was defined before.
package levant
import (
"fmt"
"os"
"strings"
"time"
nomad "github.com/hashicorp/nomad/api"
nomadStructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/jrasell/levant/client"
"github.com/jrasell/levant/levant/structs"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
// levantDeployment is the all deployment related objects for this Levant
// deployment invocation.
type levantDeployment struct {
nomad *nomad.Client
config *DeployConfig
}
// DeployConfig is the set of config structs required to run a Levant deploy.
type DeployConfig struct {
Deploy *structs.DeployConfig
Client *structs.ClientConfig
Plan *structs.PlanConfig
Template *structs.TemplateConfig
}
// newLevantDeployment sets up the Levant deployment object and Nomad client
// to interact with the Nomad API.
func newLevantDeployment(config *DeployConfig, nomadClient *nomad.Client) (*levantDeployment, error) {
var err error
if config.Deploy.EnvVault == true {
config.Deploy.VaultToken = os.Getenv("VAULT_TOKEN")
}
dep := &levantDeployment{}
dep.config = config
if nomadClient == nil {
dep.nomad, err = client.NewNomadClient(config.Client.Addr)
if err != nil {
return nil, err
}
} else {
dep.nomad = nomadClient
}
// Add the JobID as a log context field.
log.Logger = log.With().Str(structs.JobIDContextField, *config.Template.Job.ID).Logger()
return dep, nil
}
// TriggerDeployment provides the main entry point into a Levant deployment and
// is used to setup the clients before triggering the deployment process.
func TriggerDeployment(config *DeployConfig, nomadClient *nomad.Client) bool {
// Create our new deployment object.
levantDep, err := newLevantDeployment(config, nomadClient)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to setup Levant deployment")
return false
}
// Run the job validation steps and count updater.
preDepVal := levantDep.preDeployValidate()
if !preDepVal {
log.Error().Msg("levant/deploy: pre-deployment validation process failed")
return false
}
// Start the main deployment function.
success := levantDep.deploy()
if !success {
log.Error().Msg("levant/deploy: job deployment failed")
return false
}
log.Info().Msg("levant/deploy: job deployment successful")
return true
}
func (l *levantDeployment) preDeployValidate() (success bool) {
// Validate the job to check it is syntactically correct.
if _, _, err := l.nomad.Jobs().Validate(l.config.Template.Job, nil); err != nil {
log.Error().Err(err).Msg("levant/deploy: job validation failed")
return
}
// If job.Type isn't set we can't continue
if l.config.Template.Job.Type == nil {
log.Error().Msgf("levant/deploy: Nomad job `type` is not set; should be set to `%s`, `%s` or `%s`",
nomadStructs.JobTypeBatch, nomadStructs.JobTypeSystem, nomadStructs.JobTypeService)
return
}
if !l.config.Deploy.ForceCount {
if err := l.dynamicGroupCountUpdater(); err != nil {
return
}
}
return true
}
// deploy triggers a register of the job resulting in a Nomad deployment which
// is monitored to determine the eventual state.
func (l *levantDeployment) deploy() (success bool) {
log.Info().Msgf("levant/deploy: triggering a deployment")
l.config.Template.Job.VaultToken = &l.config.Deploy.VaultToken
eval, _, err := l.nomad.Jobs().Register(l.config.Template.Job, nil)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to register job with Nomad")
return
}
if l.config.Deploy.ForceBatch {
if eval.EvalID, err = l.triggerPeriodic(l.config.Template.Job.ID); err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to trigger periodic instance of job")
return
}
}
// Periodic and parameterized jobs do not return an evaluation and therefore
// can't perform the evaluationInspector unless we are forcing an instance of
// periodic which will yield an EvalID.
if !l.config.Template.Job.IsPeriodic() && !l.config.Template.Job.IsParameterized() ||
l.config.Template.Job.IsPeriodic() && l.config.Deploy.ForceBatch {
// Trigger the evaluationInspector to identify any potential errors in the
// Nomad evaluation run. As far as I can tell from testing; a single alloc
// failure in an evaluation means no allocs will be placed so we exit here.
err = l.evaluationInspector(&eval.EvalID)
if err != nil {
log.Error().Err(err).Msg("levant/deploy: something")
return
}
}
if l.isJobZeroCount() {
return true
}
switch *l.config.Template.Job.Type {
case nomadStructs.JobTypeService:
// If the service job doesn't have an update stanza, the job will not use
// Nomad deployments.
if l.config.Template.Job.Update == nil {
log.Info().Msg("levant/deploy: job is not configured with update stanza, consider adding to use deployments")
return l.jobStatusChecker(&eval.EvalID)
}
log.Info().Msgf("levant/deploy: beginning deployment watcher for job")
// Get the deploymentID from the evaluationID so that we can watch the
// deployment for end status.
depID, err := l.getDeploymentID(eval.EvalID)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of evaluation %s", eval.EvalID)
return
}
// Get the success of the deployment and return if we have success.
if success = l.deploymentWatcher(depID); success {
return
}
dep, _, err := l.nomad.Deployments().Info(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for auto-revert check", depID)
return
}
// If the job is not a canary job, then run the auto-revert checker, the
// current checking mechanism is slightly hacky and should be updated.
// The reason for this is currently the config.Job is populate from the
// rendered job and so a user could potentially not set canary meaning
// the field shows a null.
if l.config.Template.Job.Update.Canary == nil {
l.checkAutoRevert(dep)
} else if *l.config.Template.Job.Update.Canary == 0 {
l.checkAutoRevert(dep)
}
case nomadStructs.JobTypeBatch:
return l.jobStatusChecker(&eval.EvalID)
case nomadStructs.JobTypeSystem:
return l.jobStatusChecker(&eval.EvalID)
default:
log.Debug().Msgf("levant/deploy: Levant does not support advanced deployments of job type %s",
*l.config.Template.Job.Type)
success = true
}
return
}
func (l *levantDeployment) evaluationInspector(evalID *string) error {
for {
evalInfo, _, err := l.nomad.Evaluations().Info(*evalID, nil)
if err != nil {
return err
}
switch evalInfo.Status {
case nomadStructs.EvalStatusComplete, nomadStructs.EvalStatusFailed, nomadStructs.EvalStatusCancelled:
if len(evalInfo.FailedTGAllocs) == 0 {
log.Info().Msgf("levant/deploy: evaluation %s finished successfully", *evalID)
return nil
}
for group, metrics := range evalInfo.FailedTGAllocs {
// Check if any nodes have been exhausted of resources and therfore are
// unable to place allocs.
if metrics.NodesExhausted > 0 {
var exhausted, dimension []string
for e := range metrics.ClassExhausted {
exhausted = append(exhausted, e)
}
for d := range metrics.DimensionExhausted {
dimension = append(dimension, d)
}
log.Error().Msgf("levant/deploy: task group %s failed to place allocs, failed on %v and exhausted %v",
group, exhausted, dimension)
}
// Check if any node classes were filtered causing alloc placement
// failures.
if len(metrics.ClassFiltered) > 0 {
for f := range metrics.ClassFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as class \"%s\" was filtered",
group, len(metrics.ClassFiltered), f)
}
}
// Check if any node constraints were filtered causing alloc placement
// failures.
if len(metrics.ConstraintFiltered) > 0 {
for cf := range metrics.ConstraintFiltered {
log.Error().Msgf("levant/deploy: task group %s failed to place %v allocs as constraint \"%s\" was filtered",
group, len(metrics.ConstraintFiltered), cf)
}
}
}
// Do not return an error here; there could well be information from
// Nomad detailing filtered nodes but the deployment will still be
// successful. GH-220.
return nil
default:
time.Sleep(1 * time.Second)
continue
}
}
}
func (l *levantDeployment) deploymentWatcher(depID string) (success bool) {
var canaryChan chan interface{}
deploymentChan := make(chan interface{})
t := time.Now()
wt := time.Duration(5 * time.Second)
// Setup the canaryChan and launch the autoPromote go routine if autoPromote
// has been enabled.
if l.config.Deploy.Canary > 0 {
canaryChan = make(chan interface{})
go l.canaryAutoPromote(depID, l.config.Deploy.Canary, canaryChan, deploymentChan)
}
q := &nomad.QueryOptions{WaitIndex: 1, AllowStale: l.config.Client.AllowStale, WaitTime: wt}
for {
dep, meta, err := l.nomad.Deployments().Info(depID, q)
log.Debug().Msgf("levant/deploy: deployment %v running for %.2fs", depID, time.Since(t).Seconds())
// Listen for the deploymentChan closing which indicates Levant should exit
// the deployment watcher.
select {
case <-deploymentChan:
return false
default:
break
}
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to get info of deployment %s", depID)
return
}
if meta.LastIndex <= q.WaitIndex {
continue
}
q.WaitIndex = meta.LastIndex
cont, err := l.checkDeploymentStatus(dep, canaryChan)
if err != nil {
return false
}
if cont {
continue
} else {
return true
}
}
}
func (l *levantDeployment) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan chan interface{}) (bool, error) {
switch dep.Status {
case nomadStructs.DeploymentStatusSuccessful:
log.Info().Msgf("levant/deploy: deployment %v has completed successfully", dep.ID)
return false, nil
case nomadStructs.DeploymentStatusRunning:
return true, nil
default:
if shutdownChan != nil {
log.Debug().Msgf("levant/deploy: deployment %v meaning canary auto promote will shutdown", dep.Status)
close(shutdownChan)
}
log.Error().Msgf("levant/deploy: deployment %v has status %s", dep.ID, dep.Status)
// Launch the failure inspector.
l.checkFailedDeployment(&dep.ID)
return false, fmt.Errorf("deployment failed")
}
}
// canaryAutoPromote handles Levant's canary-auto-promote functionality.
func (l *levantDeployment) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) {
// Setup the AutoPromote timer.
autoPromote := time.After(time.Duration(waitTime) * time.Second)
for {
select {
case <-autoPromote:
log.Info().Msgf("levant/deploy: auto-promote period %vs has been reached for deployment %s",
waitTime, depID)
// Check the deployment is healthy before promoting.
if healthy := l.checkCanaryDeploymentHealth(depID); !healthy {
log.Error().Msgf("levant/deploy: the canary deployment %s has unhealthy allocations, unable to promote", depID)
close(deploymentChan)
return
}
log.Info().Msgf("levant/deploy: triggering auto promote of deployment %s", depID)
// Promote the deployment.
_, _, err := l.nomad.Deployments().PromoteAll(depID, nil)
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to promote deployment %s", depID)
close(deploymentChan)
return
}
case <-shutdownChan:
log.Info().Msg("levant/deploy: canary auto promote has been shutdown")
return
}
}
}
// checkCanaryDeploymentHealth is used to check the health status of each
// task-group within a canary deployment.
func (l *levantDeployment) checkCanaryDeploymentHealth(depID string) (healthy bool) {
var unhealthy int
dep, _, err := l.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: l.config.Client.AllowStale})
if err != nil {
log.Error().Err(err).Msgf("levant/deploy: unable to query deployment %s for health", depID)
return
}
// Itertate each task in the deployment to determine is health status. If an
// unhealthy task is found, incrament the unhealthy counter.
for taskName, taskInfo := range dep.TaskGroups {
// skip any task groups which are not configured for canary deployments
if taskInfo.DesiredCanaries == 0 {
log.Debug().Msgf("levant/deploy: task %s has no desired canaries, skipping health checks in deployment %s", taskName, depID)
continue
}
if taskInfo.DesiredCanaries != taskInfo.HealthyAllocs {
log.Error().Msgf("levant/deploy: task %s has unhealthy allocations in deployment %s", taskName, depID)
unhealthy++
}
}
// If zero unhealthy tasks were found, continue with the auto promotion.
if unhealthy == 0 {
log.Debug().Msgf("levant/deploy: deployment %s has 0 unhealthy allocations", depID)
healthy = true
}
return
}
// triggerPeriodic is used to force an instance of a periodic job outside of the
// planned schedule. This results in an evalID being created that can then be
// checked in the same fashion as other jobs.
func (l *levantDeployment) triggerPeriodic(jobID *string) (evalID string, err error) {
log.Info().Msg("levant/deploy: triggering a run of periodic job")
// Trigger the run if possible and just returning both the evalID and the err.
// There is no need to check this here as the caller does this.
evalID, _, err = l.nomad.Jobs().PeriodicForce(*jobID, nil)
return
}
// getDeploymentID finds the Nomad deploymentID associated to a Nomad
// evaluationID. This is only needed as sometimes Nomad initially returns eval
// info with an empty deploymentID; and a retry is required in order to get the
// updated response from Nomad.
func (l *levantDeployment) getDeploymentID(evalID string) (depID string, err error) {
var evalInfo *nomad.Evaluation
timeout := time.NewTicker(time.Second * 60)
defer timeout.Stop()
for {
select {
case <-timeout.C:
err = errors.New("timeout reached on attempting to find deployment ID")
return
default:
if evalInfo, _, err = l.nomad.Evaluations().Info(evalID, nil); err != nil {
return
}
if evalInfo.DeploymentID != "" {
return evalInfo.DeploymentID, nil
}
log.Debug().Msgf("levant/deploy: Nomad returned an empty deployment for evaluation %v; retrying", evalID)
time.Sleep(2 * time.Second)
continue
}
}
}
// dynamicGroupCountUpdater takes the templated and rendered job and updates the
// group counts based on the currently deployed job; if its running.
func (l *levantDeployment) dynamicGroupCountUpdater() error {
// Gather information about the current state, if any, of the job on the
// Nomad cluster.
rJob, _, err := l.nomad.Jobs().Info(*l.config.Template.Job.Name, &nomad.QueryOptions{})
// This is a hack due to GH-1849; we check the error string for 404 which
// indicates the job is not running, not that there was an error in the API
// call.
if err != nil && strings.Contains(err.Error(), "404") {
log.Info().Msg("levant/deploy: job is not running, using template file group counts")
return nil
} else if err != nil {
log.Error().Err(err).Msg("levant/deploy: unable to perform job evaluation")
return err
}
// Check that the job is actually running and not in a potentially stopped
// state.
if *rJob.Status != nomadStructs.JobStatusRunning {
return nil
}
log.Debug().Msgf("levant/deploy: running dynamic job count updater")
// Iterate the templated job and the Nomad returned job and update group count
// based on matches.
for _, rGroup := range rJob.TaskGroups {
for _, group := range l.config.Template.Job.TaskGroups {
if *rGroup.Name == *group.Name {
log.Info().Msgf("levant/deploy: using dynamic count %v for group %s",
*rGroup.Count, *group.Name)
group.Count = rGroup.Count
}
}
}
return nil
}
func (l *levantDeployment) isJobZeroCount() bool {
for _, tg := range l.config.Template.Job.TaskGroups {
if tg.Count == nil {
return false
} else if *tg.Count > 0 {
return false
}
}
return true
}
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This LevelDB Go implementation is based on LevelDB C++ implementation.
// Which contains the following header:
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LEVELDBCPP_LICENSE file. See the LEVELDBCPP_AUTHORS file
// for names of contributors.
// Package db provide implementation of LevelDB database.
package db
import (
"fmt"
"leveldb/descriptor"
"leveldb/errors"
"leveldb/iter"
"leveldb/memdb"
"leveldb/opt"
"os"
"runtime"
"strings"
"sync"
"unsafe"
)
// DB represent a database session.
type DB struct {
s *session
cch chan cSignal // compaction worker signal
creq chan *cReq // compaction request
wlock chan struct{} // writer mutex
wqueue chan *Batch // writer queue
wack chan error // writer ack
lch chan *Batch // log writer chan
lack chan error // log writer ack
ewg sync.WaitGroup // exit WaitGroup
cstats [kNumLevels]cStats // Compaction stats
mem unsafe.Pointer
log, flog *logWriter
seq, fseq uint64
snaps *snaps
closed uint32
err unsafe.Pointer
}
// Open open or create database from given desc.
func Open(desc descriptor.Descriptor, o *opt.Options) (d *DB, err error) {
s := newSession(desc, o)
err = s.recover()
if os.IsNotExist(err) && o.HasFlag(opt.OFCreateIfMissing) {
err = s.create()
} else if err == nil && o.HasFlag(opt.OFErrorIfExist) {
err = os.ErrExist
}
if err != nil {
return
}
d = &DB{
s: s,
cch: make(chan cSignal),
creq: make(chan *cReq),
wlock: make(chan struct{}, 1),
wqueue: make(chan *Batch),
wack: make(chan error),
lch: make(chan *Batch),
lack: make(chan error),
seq: s.stSeq,
snaps: newSnaps(),
}
err = d.recoverLog()
if err != nil {
return
}
// remove any obsolete files
d.cleanFiles()
go d.compaction()
go d.writeLog()
// wait for compaction goroutine
d.cch <- cWait
return
}
func (d *DB) recoverLog() (err error) {
s := d.s
icmp := s.cmp
s.printf("LogRecovery: started, min=%d", s.stLogNum)
mb := new(memBatch)
cm := newCMem(s)
logs, skip := files(s.getFiles(descriptor.TypeLog)), 0
logs.sort()
for _, log := range logs {
if log.Number() < s.stLogNum {
skip++
continue
}
s.markFileNum(log.Number())
}
var r, fr *logReader
for _, log := range logs[skip:] {
s.printf("LogRecovery: recovering, num=%d", log.Number())
r, err = newLogReader(log, true, s.logDropFunc("log", log.Number()))
if err != nil {
return
}
if mb.mem != nil {
if mb.mem.Len() > 0 {
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
}
err = cm.commit(r.file.Number(), d.seq)
if err != nil {
return
}
cm.reset()
fr.remove()
fr = nil
}
mb.mem = memdb.New(icmp)
for r.log.Next() {
d.seq, err = replayBatch(r.log.Record(), mb)
if err != nil {
return
}
if mb.mem.Size() > s.o.GetWriteBuffer() {
// flush to table
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
// create new memdb
mb.mem = memdb.New(icmp)
}
}
err = r.log.Error()
if err != nil {
return
}
r.close()
fr = r
}
// create new log
_, err = d.newMem()
if err != nil {
return
}
if mb.mem != nil && mb.mem.Len() > 0 {
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
}
err = cm.commit(d.log.file.Number(), d.seq)
if err != nil {
return
}
if fr != nil {
fr.remove()
}
return
}
func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
s := d.s
ucmp := s.cmp.cmp
ikey := newIKey(key, seq, tSeek)
memGet := func(m *memdb.DB) bool {
var k []byte
k, value, err = m.Get(ikey)
if err != nil {
return false
}
ik := iKey(k)
if ucmp.Compare(ik.ukey(), key) != 0 {
return false
}
if _, t, ok := ik.parseNum(); ok {
if t == tDel {
value = nil
err = errors.ErrNotFound
}
return true
}
return false
}
mem := d.getMem()
if memGet(mem.cur) || (mem.froze != nil && memGet(mem.froze)) {
return
}
value, cState, err := s.version().get(ikey, ro)
if cState && !d.isClosed() {
// schedule compaction
select {
case d.cch <- cSched:
default:
}
}
return
}
// Get get value for given key of the latest snapshot of database.
func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
err = d.rok()
if err != nil {
return
}
return d.get(key, d.getSeq(), ro)
}
// NewIterator return an iterator over the contents of the latest snapshot of
// database. The result of NewIterator() is initially invalid (caller must
// call Next or one of Seek method, ie First, Last or Seek).
func (d *DB) NewIterator(ro *opt.ReadOptions) iter.Iterator {
p := d.newSnapshot()
i := p.NewIterator(ro)
x, ok := i.(*Iterator)
if ok {
runtime.SetFinalizer(x, func(x *Iterator) {
p.Release()
})
} else {
p.Release()
}
return i
}
// GetSnapshot return a handle to the current DB state.
// Iterators created with this handle will all observe a stable snapshot
// of the current DB state. The caller must call *Snapshot.Release() when the
// snapshot is no longer needed.
func (d *DB) GetSnapshot() (snap *Snapshot, err error) {
err = d.rok()
if err != nil {
return
}
snap = d.newSnapshot()
runtime.SetFinalizer(snap, func(x *Snapshot) {
x.Release()
})
return
}
// GetProperty used to query exported database state.
//
// Valid property names include:
//
// "leveldb.num-files-at-level<N>" - return the number of files at level <N>,
// where <N> is an ASCII representation of a level number (e.g. "0").
// "leveldb.stats" - returns a multi-line string that describes statistics
// about the internal operation of the DB.
// "leveldb.sstables" - returns a multi-line string that describes all
// of the sstables that make up the db contents.
func (d *DB) GetProperty(prop string) (value string, err error) {
err = d.rok()
if err != nil {
return
}
const prefix = "leveldb."
if !strings.HasPrefix(prop, prefix) {
return "", errors.ErrInvalid("unknown property: " + prop)
}
p := prop[len(prefix):]
switch s := d.s; true {
case strings.HasPrefix(p, "num-files-at-level"):
var level uint
var rest string
n, _ := fmt.Scanf("%d%s", &level, &rest)
if n != 1 || level >= kNumLevels {
return "", errors.ErrInvalid("invalid property: " + prop)
}
value = fmt.Sprint(s.version().tLen(int(level)))
case p == "stats":
v := s.version()
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
for level, tt := range v.tables {
duration, read, write := d.cstats[level].get()
if len(tt) == 0 && duration == 0 {
continue
}
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "sstables":
v := s.version()
for level, tt := range v.tables {
value += fmt.Sprintf("--- level %d ---\n", level)
for _, t := range tt {
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Number(), t.size, t.min, t.max)
}
}
default:
return "", errors.ErrInvalid("unknown property: " + prop)
}
return
}
// GetApproximateSizes calculate approximate sizes of given ranges.
//
// Note that the returned sizes measure file system space usage, so
// if the user data compresses by a factor of ten, the returned
// sizes will be one-tenth the size of the corresponding user data size.
//
// The results may not include the sizes of recently written data.
func (d *DB) GetApproximateSizes(rr []Range) (sizes Sizes, err error) {
err = d.rok()
if err != nil {
return
}
v := d.s.version()
sizes = make(Sizes, 0, len(rr))
for _, r := range rr {
min := newIKey(r.Start, kMaxSeq, tSeek)
max := newIKey(r.Limit, kMaxSeq, tSeek)
start, err := v.approximateOffsetOf(min)
if err != nil {
return nil, err
}
limit, err := v.approximateOffsetOf(max)
if err != nil {
return nil, err
}
var size uint64
if limit >= start {
size = limit - start
}
sizes = append(sizes, size)
}
return
}
// CompactRange compact the underlying storage for the key range.
//
// In particular, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// Range.Start==nil is treated as a key before all keys in the database.
// Range.Limit==nil is treated as a key after all keys in the database.
// Therefore calling with Start==nil and Limit==nil will compact entire
// database.
func (d *DB) CompactRange(r Range) error {
err := d.wok()
if err != nil {
return err
}
req := &cReq{level: -1}
req.min = r.Start
req.max = r.Limit
d.creq <- req
d.cch <- cWait
return d.wok()
}
// Close closes the database. Snapshot and iterator are invalid
// after this call
func (d *DB) Close() error {
if !d.setClosed() {
return errors.ErrClosed
}
d.wlock <- struct{}{}
drain:
for {
select {
case <-d.wqueue:
d.wack <- errors.ErrClosed
default:
break drain
}
}
close(d.wlock)
// wake log writer goroutine
d.lch <- nil
// wake Compaction goroutine
d.cch <- cClose
// wait for the WaitGroup
d.ewg.Wait()
d.s.tops.purgeCache()
cache := d.s.o.GetBlockCache()
if cache != nil {
cache.Purge(nil)
}
if d.log != nil {
d.log.close()
}
if d.s.manifest != nil {
d.s.manifest.close()
}
runtime.GC()
return d.geterr()
}
db: introduce Recover(); allows recovering database with missing or corrupted manifest
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This LevelDB Go implementation is based on LevelDB C++ implementation.
// Which contains the following header:
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LEVELDBCPP_LICENSE file. See the LEVELDBCPP_AUTHORS file
// for names of contributors.
// Package db provide implementation of LevelDB database.
package db
import (
"fmt"
"leveldb/descriptor"
"leveldb/errors"
"leveldb/iter"
"leveldb/memdb"
"leveldb/opt"
"os"
"runtime"
"strings"
"sync"
"unsafe"
)
// DB represent a database session.
type DB struct {
s *session
cch chan cSignal // compaction worker signal
creq chan *cReq // compaction request
wlock chan struct{} // writer mutex
wqueue chan *Batch // writer queue
wack chan error // writer ack
lch chan *Batch // log writer chan
lack chan error // log writer ack
ewg sync.WaitGroup // exit WaitGroup
cstats [kNumLevels]cStats // Compaction stats
mem unsafe.Pointer
log, flog *logWriter
seq, fseq uint64
snaps *snaps
closed uint32
err unsafe.Pointer
}
func open(s *session) (d *DB, err error) {
d = &DB{
s: s,
cch: make(chan cSignal),
creq: make(chan *cReq),
wlock: make(chan struct{}, 1),
wqueue: make(chan *Batch),
wack: make(chan error),
lch: make(chan *Batch),
lack: make(chan error),
seq: s.stSeq,
snaps: newSnaps(),
}
err = d.recoverLog()
if err != nil {
return
}
// remove any obsolete files
d.cleanFiles()
go d.compaction()
go d.writeLog()
// wait for compaction goroutine
d.cch <- cWait
return
}
// Open open or create database from given desc.
func Open(desc descriptor.Descriptor, o *opt.Options) (d *DB, err error) {
s := newSession(desc, o)
err = s.recover()
if os.IsNotExist(err) && o.HasFlag(opt.OFCreateIfMissing) {
err = s.create()
} else if err == nil && o.HasFlag(opt.OFErrorIfExist) {
err = os.ErrExist
}
if err != nil {
return
}
return open(s)
}
// Recover recover database with missing or corrupted manifest file. It will
// ignore any manifest files, valid or not.
func Recover(desc descriptor.Descriptor, o *opt.Options) (d *DB, err error) {
s := newSession(desc, o)
// get all files
ff := files(s.getFiles(descriptor.TypeAll))
ff.sort()
s.printf("Recover: started, files=%d", len(ff))
rec := new(sessionRecord)
// recover tables
ro := &opt.ReadOptions{}
var nt *tFile
for _, f := range ff {
if f.Type() != descriptor.TypeTable {
continue
}
var size uint64
size, err = f.Size()
if err != nil {
return
}
t := newTFile(f, size, nil, nil)
iter := s.tops.newIterator(t, ro)
// min ikey
if iter.First() {
t.min = iter.Key()
} else if iter.Error() != nil {
err = iter.Error()
return
} else {
continue
}
// max ikey
if iter.Last() {
t.max = iter.Key()
} else if iter.Error() != nil {
err = iter.Error()
return
} else {
continue
}
// add table to level 0
rec.addTableFile(0, t)
nt = t
}
// extract largest seq number from newest table
if nt != nil {
var lseq uint64
iter := s.tops.newIterator(nt, ro)
for iter.Next() {
seq, _, ok := iKey(iter.Key()).parseNum()
if !ok {
continue
}
if seq > lseq {
lseq = seq
}
}
rec.setSeq(lseq)
}
// set file num based on largest one
s.stFileNum = ff[len(ff)-1].Number() + 1
// create brand new manifest
err = s.create()
if err != nil {
return
}
// commit record
err = s.commit(rec)
if err != nil {
return
}
return open(s)
}
func (d *DB) recoverLog() (err error) {
s := d.s
icmp := s.cmp
s.printf("LogRecovery: started, min=%d", s.stLogNum)
mb := new(memBatch)
cm := newCMem(s)
logs, skip := files(s.getFiles(descriptor.TypeLog)), 0
logs.sort()
for _, log := range logs {
if log.Number() < s.stLogNum {
skip++
continue
}
s.markFileNum(log.Number())
}
var r, fr *logReader
for _, log := range logs[skip:] {
s.printf("LogRecovery: recovering, num=%d", log.Number())
r, err = newLogReader(log, true, s.logDropFunc("log", log.Number()))
if err != nil {
return
}
if mb.mem != nil {
if mb.mem.Len() > 0 {
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
}
err = cm.commit(r.file.Number(), d.seq)
if err != nil {
return
}
cm.reset()
fr.remove()
fr = nil
}
mb.mem = memdb.New(icmp)
for r.log.Next() {
d.seq, err = replayBatch(r.log.Record(), mb)
if err != nil {
return
}
if mb.mem.Size() > s.o.GetWriteBuffer() {
// flush to table
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
// create new memdb
mb.mem = memdb.New(icmp)
}
}
err = r.log.Error()
if err != nil {
return
}
r.close()
fr = r
}
// create new log
_, err = d.newMem()
if err != nil {
return
}
if mb.mem != nil && mb.mem.Len() > 0 {
err = cm.flush(mb.mem, 0)
if err != nil {
return
}
}
err = cm.commit(d.log.file.Number(), d.seq)
if err != nil {
return
}
if fr != nil {
fr.remove()
}
return
}
func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
s := d.s
ucmp := s.cmp.cmp
ikey := newIKey(key, seq, tSeek)
memGet := func(m *memdb.DB) bool {
var k []byte
k, value, err = m.Get(ikey)
if err != nil {
return false
}
ik := iKey(k)
if ucmp.Compare(ik.ukey(), key) != 0 {
return false
}
if _, t, ok := ik.parseNum(); ok {
if t == tDel {
value = nil
err = errors.ErrNotFound
}
return true
}
return false
}
mem := d.getMem()
if memGet(mem.cur) || (mem.froze != nil && memGet(mem.froze)) {
return
}
value, cState, err := s.version().get(ikey, ro)
if cState && !d.isClosed() {
// schedule compaction
select {
case d.cch <- cSched:
default:
}
}
return
}
// Get get value for given key of the latest snapshot of database.
func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
err = d.rok()
if err != nil {
return
}
return d.get(key, d.getSeq(), ro)
}
// NewIterator return an iterator over the contents of the latest snapshot of
// database. The result of NewIterator() is initially invalid (caller must
// call Next or one of Seek method, ie First, Last or Seek).
func (d *DB) NewIterator(ro *opt.ReadOptions) iter.Iterator {
p := d.newSnapshot()
i := p.NewIterator(ro)
x, ok := i.(*Iterator)
if ok {
runtime.SetFinalizer(x, func(x *Iterator) {
p.Release()
})
} else {
p.Release()
}
return i
}
// GetSnapshot return a handle to the current DB state.
// Iterators created with this handle will all observe a stable snapshot
// of the current DB state. The caller must call *Snapshot.Release() when the
// snapshot is no longer needed.
func (d *DB) GetSnapshot() (snap *Snapshot, err error) {
err = d.rok()
if err != nil {
return
}
snap = d.newSnapshot()
runtime.SetFinalizer(snap, func(x *Snapshot) {
x.Release()
})
return
}
// GetProperty used to query exported database state.
//
// Valid property names include:
//
// "leveldb.num-files-at-level<N>" - return the number of files at level <N>,
// where <N> is an ASCII representation of a level number (e.g. "0").
// "leveldb.stats" - returns a multi-line string that describes statistics
// about the internal operation of the DB.
// "leveldb.sstables" - returns a multi-line string that describes all
// of the sstables that make up the db contents.
func (d *DB) GetProperty(prop string) (value string, err error) {
err = d.rok()
if err != nil {
return
}
const prefix = "leveldb."
if !strings.HasPrefix(prop, prefix) {
return "", errors.ErrInvalid("unknown property: " + prop)
}
p := prop[len(prefix):]
switch s := d.s; true {
case strings.HasPrefix(p, "num-files-at-level"):
var level uint
var rest string
n, _ := fmt.Scanf("%d%s", &level, &rest)
if n != 1 || level >= kNumLevels {
return "", errors.ErrInvalid("invalid property: " + prop)
}
value = fmt.Sprint(s.version().tLen(int(level)))
case p == "stats":
v := s.version()
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
for level, tt := range v.tables {
duration, read, write := d.cstats[level].get()
if len(tt) == 0 && duration == 0 {
continue
}
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "sstables":
v := s.version()
for level, tt := range v.tables {
value += fmt.Sprintf("--- level %d ---\n", level)
for _, t := range tt {
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Number(), t.size, t.min, t.max)
}
}
default:
return "", errors.ErrInvalid("unknown property: " + prop)
}
return
}
// GetApproximateSizes calculate approximate sizes of given ranges.
//
// Note that the returned sizes measure file system space usage, so
// if the user data compresses by a factor of ten, the returned
// sizes will be one-tenth the size of the corresponding user data size.
//
// The results may not include the sizes of recently written data.
func (d *DB) GetApproximateSizes(rr []Range) (sizes Sizes, err error) {
err = d.rok()
if err != nil {
return
}
v := d.s.version()
sizes = make(Sizes, 0, len(rr))
for _, r := range rr {
min := newIKey(r.Start, kMaxSeq, tSeek)
max := newIKey(r.Limit, kMaxSeq, tSeek)
start, err := v.approximateOffsetOf(min)
if err != nil {
return nil, err
}
limit, err := v.approximateOffsetOf(max)
if err != nil {
return nil, err
}
var size uint64
if limit >= start {
size = limit - start
}
sizes = append(sizes, size)
}
return
}
// CompactRange compact the underlying storage for the key range.
//
// In particular, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// Range.Start==nil is treated as a key before all keys in the database.
// Range.Limit==nil is treated as a key after all keys in the database.
// Therefore calling with Start==nil and Limit==nil will compact entire
// database.
func (d *DB) CompactRange(r Range) error {
err := d.wok()
if err != nil {
return err
}
req := &cReq{level: -1}
req.min = r.Start
req.max = r.Limit
d.creq <- req
d.cch <- cWait
return d.wok()
}
// Close closes the database. Snapshot and iterator are invalid
// after this call
func (d *DB) Close() error {
if !d.setClosed() {
return errors.ErrClosed
}
d.wlock <- struct{}{}
drain:
for {
select {
case <-d.wqueue:
d.wack <- errors.ErrClosed
default:
break drain
}
}
close(d.wlock)
// wake log writer goroutine
d.lch <- nil
// wake Compaction goroutine
d.cch <- cClose
// wait for the WaitGroup
d.ewg.Wait()
d.s.tops.purgeCache()
cache := d.s.o.GetBlockCache()
if cache != nil {
cache.Purge(nil)
}
if d.log != nil {
d.log.close()
}
if d.s.manifest != nil {
d.s.manifest.close()
}
runtime.GC()
return d.geterr()
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"fmt"
"math/rand"
"time"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
dRetryPeriod = 2 * time.Second
dRetryTimeout = 5 * time.Minute
)
var (
nilRs *extensions.ReplicaSet
)
var _ = SIGDescribe("Deployment", func() {
var ns string
var c clientset.Interface
AfterEach(func() {
failureTrap(c, ns)
})
f := framework.NewDefaultFramework("deployment")
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
testRollbackDeployment(f)
})
It("deployment should support rollback when there's replica set with no revision", func() {
testRollbackDeploymentRSNoRevision(f)
})
It("deployment should label adopted RSs and pods", func() {
testDeploymentLabelAdopted(f)
})
It("scaled rollout deployment should not block on annotation check", func() {
testScaledRolloutDeployment(f)
})
It("overlapping deployment should not fight with each other", func() {
testOverlappingDeployment(f)
})
It("lack of progress should be reported in the deployment status", func() {
testFailedDeployment(f)
})
It("iterative rollouts should eventually progress", func() {
testIterativeDeployments(f)
})
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
// See https://github.com/kubernetes/kubernetes/issues/29229
})
func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.Extensions().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return
}
for i := range deployments.Items {
d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return
}
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
rsList := allOldRSs
if newRS != nil {
rsList = append(rsList, newRS)
}
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
}
// We need print all the ReplicaSets if there are no Deployment object created
if len(deployments.Items) != 0 {
return
}
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return
}
for _, rs := range rss.Items {
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(rs.Namespace).List(options)
for _, pod := range podList.Items {
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
}
}
}
func intOrStrP(num int) *intstr.IntOrString {
intstr := intstr.FromInt(num)
return &intstr
}
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
return &extensions.DeploymentRollback{
Name: name,
UpdatedAnnotations: annotations,
RollbackTo: extensions.RollbackConfig{Revision: revision},
}
}
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
Expect(newRS.Annotations).NotTo(Equal(nil))
Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
// Check revision of This deployment
Expect(deployment.Annotations).NotTo(Equal(nil))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
if len(imageName) > 0 {
// Check the image the new replica set creates
Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
// Check the image the deployment creates
Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
}
return deployment, newRS
}
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
// Pods may be created by overlapping deployments right after this deployment is deleted, ignore them
if len(pods.Items) == 0 {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
}
}
func testDeleteDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
internalClient := f.InternalClientset
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
stopDeployment(c, internalClient, ns, deploymentName)
}
func testRollingUpdateDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": NginxImageName,
}
rsName := "test-rolling-update-controller"
replicas := int32(1)
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 3546343826724305833.
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
// The old RS should contain pod-template-hash in its selector, label, and template label
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
func testRecreateDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting deployment %q to complete", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
// Update deployment to delete redis pods and bring up nginx pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
}
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": NginxImageName,
}
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
options := metav1.ListOptions{
ResourceVersion: pods.ListMeta.ResourceVersion,
}
stopCh := make(chan struct{})
defer close(stopCh)
w, err := c.Core().Pods(ns).Watch(options)
Expect(err).NotTo(HaveOccurred())
go func() {
// There should be only one pod being created, which is the pod with the redis image.
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
numPodCreation := 1
for {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
continue
}
numPodCreation--
if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
}
pod, ok := event.Object.(*v1.Pod)
if !ok {
framework.Failf("Expect event Object to be a pod")
}
if pod.Spec.Containers[0].Name != RedisImageName {
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
}
case <-stopCh:
return
}
}
}()
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
}
// testRolloverDeployment tests that deployment supports rollover.
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": NginxImageName,
}
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Wait for replica set to become ready before adopting it.
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
Expect(framework.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
// We use a nonexistent image here, so that we make sure it won't finish
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
}
newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
// Check if it's updated to revision 1 correctly
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(1))
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(1))
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q is complete", deploymentName)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(0))
}
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
Expect(rs.Status.Replicas).Should(Equal(replicas))
}
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision.
func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
// 1. Create a deployment to create nginx pods.
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create"
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred())
// 2. Update the deployment to create redis pods.
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update"
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred())
// 3. Update the deploymentRollback to rollback to revision 1
revision := int64(1)
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here
// Wait for it to be updated to revision 3
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create", after the rollback
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to last revision
revision = 0
framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 4
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update", after the rollback
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred())
}
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail.
// Then update the deployment to v2 and rollback it to v1 should succeed, now the deployment
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail.
// Finally, rollback the deployment (v3) to v3 should be no-op.
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": NginxImageName,
}
// Create an old RS without revision
rsName := "test-rollback-no-revision-controller"
rsReplicas := int32(0)
rs := newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// 1. Create a deployment to create nginx pods, which have different template than the replica set created above.
deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Check that the replica set we created still doesn't contain revision information
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// 2. Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1
revision := int64(0)
framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no last revision
// Check if the deployment is still revision 1 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// 3. Update the deployment to create redis pods.
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to revision 1
revision = 1
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here
// The pod template should be updated to the one in revision 1
// Wait for it to be updated to revision 3
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// 5. Update the deploymentRollback to rollback to revision 10
// Since there's no revision 10 in history, it should stay as revision 3
revision = 10
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no revision 10
// Check if it's still revision 3 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage)
// 6. Update the deploymentRollback to rollback to revision 3
// Since it's already revision 3, it should be no-op
revision = 3
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
// The pod template shouldn't change since it's already revision 3
// Check if it's still revision 3 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage)
}
func testDeploymentLabelAdopted(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
podName := "nginx"
podLabels := map[string]string{"name": podName}
rsName := "test-adopted-controller"
replicas := int32(1)
image := NginxImage
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Create a nginx deployment to adopt the old rs.
deploymentName := "test-adopted-deployment"
framework.Logf("Creating deployment %s", deploymentName)
deploy, err := c.Extensions().Deployments(ns).Create(framework.NewDeployment(deploymentName, replicas, podLabels, podName, image, extensions.RollingUpdateDeploymentStrategyType))
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image)
Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(len(oldRSs)).Should(Equal(0))
Expect(len(allOldRSs)).Should(Equal(0))
// New RS should contain pod-template-hash in its selector, label, and template label
err = framework.CheckRSHashLabel(newRS)
Expect(err).NotTo(HaveOccurred())
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.CheckPodHashLabel(pods)
Expect(err).NotTo(HaveOccurred())
Expect(int32(len(pods.Items))).Should(Equal(replicas))
}
func testScaledRolloutDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(10)
// Create a nginx deployment.
deploymentName := "nginx"
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
// Verify that the required pods have come up.
framework.Logf("Waiting for all required pods to come up")
err = framework.VerifyPodsRunning(f.ClientSet, ns, NginxImageName, false, *(deployment.Spec.Replicas))
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
first, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
// Update the deployment with a non-existent image so that the new replica set will be blocked.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
}
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
second, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
Expect(err).NotTo(HaveOccurred())
secondCond := replicaSetHasDesiredReplicas(c.Extensions(), second)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Updating the size (up) and template at the same time for deployment %q", deploymentName)
newReplicas := int32(20)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = NautilusImage
})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) {
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
Expect(err).NotTo(HaveOccurred())
}
// Update the deployment with a non-existent image so that the new replica set will be blocked.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
}
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
oldRs, err := c.Extensions().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
oldCond := replicaSetHasDesiredReplicas(c.Extensions(), oldRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
Expect(err).NotTo(HaveOccurred())
newCond := replicaSetHasDesiredReplicas(c.Extensions(), newRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Updating the size (down) and template at the same time for deployment %q", deploymentName)
newReplicas = int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = KittenImage
})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) {
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
Expect(err).NotTo(HaveOccurred())
}
}
func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create first deployment.
deploymentName := "first-deployment"
podLabels := map[string]string{"name": RedisImageName}
replicas := int32(1)
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred(), "Failed creating the first deployment")
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploy.Name, "1", RedisImage)
Expect(err).NotTo(HaveOccurred(), "The first deployment failed to update to revision 1")
// Create second deployment with overlapping selector.
deploymentName = "second-deployment"
framework.Logf("Creating deployment %q with overlapping selector", deploymentName)
podLabels["other-label"] = "random-label"
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deployOverlapping, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred(), "Failed creating the second deployment")
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "1", NginxImage)
Expect(err).NotTo(HaveOccurred(), "The second deployment failed to update to revision 1")
// Both deployments should proceed independently.
framework.Logf("Checking each deployment creates its own replica set")
options := metav1.ListOptions{}
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
Expect(rsList.Items).To(HaveLen(2))
}
func testFailedDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
// Create a nginx deployment.
deploymentName := "progress-check"
nonExistentImage := "nginx:not-there"
ten := int32(10)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, nonExistentImage, extensions.RecreateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &ten
framework.Logf("Creating deployment %q with progressDeadlineSeconds set to %ds and a non-existent image", deploymentName, ten)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q new replica set to come up", deploymentName)
Expect(framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, replicas, deployment.Generation))
framework.Logf("Checking deployment %q for a timeout condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.TimedOutReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
framework.Logf("Updating deployment %q with a good image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q new replica set to come up", deploymentName)
Expect(framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, replicas, deployment.Generation))
framework.Logf("Waiting for deployment %q status", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
}
func randomScale(d *extensions.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
framework.Logf("%02d: scaling up", i)
*(d.Spec.Replicas)++
case r < 0.6:
if *(d.Spec.Replicas) > 1 {
framework.Logf("%02d: scaling down", i)
*(d.Spec.Replicas)--
}
}
}
func testIterativeDeployments(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(6)
zero := int64(0)
two := int32(2)
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
iterations := 20
for i := 0; i < iterations; i++ {
if r := rand.Float32(); r < 0.6 {
time.Sleep(time.Duration(float32(i) * r * float32(time.Second)))
}
switch n := rand.Float32(); {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
rollbackTo := &extensions.RollbackConfig{Revision: 0}
update.Spec.RollbackTo = rollbackTo
})
Expect(err).NotTo(HaveOccurred())
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
case n < 0.8:
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
}
default:
// arbitrarily delete deployment pods
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(opts)
Expect(err).NotTo(HaveOccurred())
if len(podList.Items) == 0 {
framework.Logf("%02d: no deployment pods to delete", i)
continue
}
for p := range podList.Items {
if rand.Float32() < 0.5 {
continue
}
name := podList.Items[p].Name
framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.Core().Pods(ns).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}
}
}
// unpause the deployment if we end up pausing it
deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Spec.Paused {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false
})
}
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q status", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
}
func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
desiredGeneration := replicaSet.Generation
return func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil
}
}
func testDeploymentsControllerRef(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
deploymentName := "test-orphan-deployment"
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Checking its ReplicaSet has the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
err = orphanDeploymentReplicaSets(c, deploy)
Expect(err).NotTo(HaveOccurred())
By("Wait for the ReplicaSet to be orphaned")
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
Expect(err).NotTo(HaveOccurred())
}
func waitDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
err := checkDeploymentReplicaSetsControllerRef(c, ns, uid, label)
if err != nil {
return false, nil
}
return true, nil
}
}
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
rsList := listDeploymentReplicaSets(c, ns, label)
for _, rs := range rsList.Items {
// This rs is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef)
}
}
return nil
}
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
rsList := listDeploymentReplicaSets(c, ns, label)
for _, rs := range rsList.Items {
// This rs is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
return rsList
}
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.Extensions().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
}
Combine deployment rollback e2e tests
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"fmt"
"math/rand"
"time"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
dRetryPeriod = 2 * time.Second
dRetryTimeout = 5 * time.Minute
)
var (
nilRs *extensions.ReplicaSet
)
var _ = SIGDescribe("Deployment", func() {
var ns string
var c clientset.Interface
AfterEach(func() {
failureTrap(c, ns)
})
f := framework.NewDefaultFramework("deployment")
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
testRollbackDeployment(f)
})
It("deployment should support rollback when there's replica set with no revision", func() {
testRollbackDeploymentRSNoRevision(f)
})
It("deployment should label adopted RSs and pods", func() {
testDeploymentLabelAdopted(f)
})
It("scaled rollout deployment should not block on annotation check", func() {
testScaledRolloutDeployment(f)
})
It("overlapping deployment should not fight with each other", func() {
testOverlappingDeployment(f)
})
It("lack of progress should be reported in the deployment status", func() {
testFailedDeployment(f)
})
It("iterative rollouts should eventually progress", func() {
testIterativeDeployments(f)
})
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
// See https://github.com/kubernetes/kubernetes/issues/29229
})
func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.Extensions().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return
}
for i := range deployments.Items {
d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return
}
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
rsList := allOldRSs
if newRS != nil {
rsList = append(rsList, newRS)
}
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
}
// We need print all the ReplicaSets if there are no Deployment object created
if len(deployments.Items) != 0 {
return
}
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return
}
for _, rs := range rss.Items {
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(rs.Namespace).List(options)
for _, pod := range podList.Items {
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
}
}
}
func intOrStrP(num int) *intstr.IntOrString {
intstr := intstr.FromInt(num)
return &intstr
}
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
return &extensions.DeploymentRollback{
Name: name,
UpdatedAnnotations: annotations,
RollbackTo: extensions.RollbackConfig{Revision: revision},
}
}
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
Expect(newRS.Annotations).NotTo(Equal(nil))
Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
// Check revision of This deployment
Expect(deployment.Annotations).NotTo(Equal(nil))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
if len(imageName) > 0 {
// Check the image the new replica set creates
Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
// Check the image the deployment creates
Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
}
return deployment, newRS
}
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
// Pods may be created by overlapping deployments right after this deployment is deleted, ignore them
if len(pods.Items) == 0 {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
}
}
func testDeleteDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
internalClient := f.InternalClientset
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
stopDeployment(c, internalClient, ns, deploymentName)
}
func testRollingUpdateDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": NginxImageName,
}
rsName := "test-rolling-update-controller"
replicas := int32(1)
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 3546343826724305833.
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
// The old RS should contain pod-template-hash in its selector, label, and template label
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
func testRecreateDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting deployment %q to complete", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
// Update deployment to delete redis pods and bring up nginx pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
}
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": NginxImageName,
}
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
options := metav1.ListOptions{
ResourceVersion: pods.ListMeta.ResourceVersion,
}
stopCh := make(chan struct{})
defer close(stopCh)
w, err := c.Core().Pods(ns).Watch(options)
Expect(err).NotTo(HaveOccurred())
go func() {
// There should be only one pod being created, which is the pod with the redis image.
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
numPodCreation := 1
for {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
continue
}
numPodCreation--
if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
}
pod, ok := event.Object.(*v1.Pod)
if !ok {
framework.Failf("Expect event Object to be a pod")
}
if pod.Spec.Containers[0].Name != RedisImageName {
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
}
case <-stopCh:
return
}
}
}()
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
}
// testRolloverDeployment tests that deployment supports rollover.
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": NginxImageName,
}
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Wait for replica set to become ready before adopting it.
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
Expect(framework.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
// We use a nonexistent image here, so that we make sure it won't finish
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
}
newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
// Check if it's updated to revision 1 correctly
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(1))
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(1))
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q is complete", deploymentName)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(0))
}
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
Expect(rs.Status.Replicas).Should(Equal(replicas))
}
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision (which is revision 4 that comes from revision 2).
// Then rollback the deployment to revision 10 (doesn't exist in history) should fail.
// Finally, rollback current deployment (revision 4) to revision 4 should be no-op.
func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
// 1. Create a deployment to create nginx pods.
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create"
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred())
// 2. Update the deployment to create redis pods.
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update"
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred())
// 3. Update the deploymentRollback to rollback to revision 1
revision := int64(1)
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here
// Wait for it to be updated to revision 3
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create", after the rollback
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to last revision
revision = 0
framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 4
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update", after the rollback
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred())
// 5. Update the deploymentRollback to rollback to revision 10
// Since there's no revision 10 in history, it should stay as revision 4
revision = 10
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no revision 10
// Check if it's still revision 4 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
// 6. Update the deploymentRollback to rollback to revision 4
// Since it's already revision 4, it should be no-op
revision = 4
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
// The pod template shouldn't change since it's already revision 4
// Check if it's still revision 4 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
}
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail.
// Then update the deployment to v2 and rollback it to v1 should succeed, now the deployment
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail.
// Finally, rollback the deployment (v3) to v3 should be no-op.
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": NginxImageName,
}
// Create an old RS without revision
rsName := "test-rollback-no-revision-controller"
rsReplicas := int32(0)
rs := newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// 1. Create a deployment to create nginx pods, which have different template than the replica set created above.
deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Check that the replica set we created still doesn't contain revision information
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// 2. Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1
revision := int64(0)
framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no last revision
// Check if the deployment is still revision 1 and still has the old pod template
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// 3. Update the deployment to create redis pods.
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to revision 1
revision = 1
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here
// The pod template should be updated to the one in revision 1
// Wait for it to be updated to revision 3
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
}
func testDeploymentLabelAdopted(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
podName := "nginx"
podLabels := map[string]string{"name": podName}
rsName := "test-adopted-controller"
replicas := int32(1)
image := NginxImage
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, replicas)
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
// Create a nginx deployment to adopt the old rs.
deploymentName := "test-adopted-deployment"
framework.Logf("Creating deployment %s", deploymentName)
deploy, err := c.Extensions().Deployments(ns).Create(framework.NewDeployment(deploymentName, replicas, podLabels, podName, image, extensions.RollingUpdateDeploymentStrategyType))
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image)
Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
Expect(len(oldRSs)).Should(Equal(0))
Expect(len(allOldRSs)).Should(Equal(0))
// New RS should contain pod-template-hash in its selector, label, and template label
err = framework.CheckRSHashLabel(newRS)
Expect(err).NotTo(HaveOccurred())
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.CheckPodHashLabel(pods)
Expect(err).NotTo(HaveOccurred())
Expect(int32(len(pods.Items))).Should(Equal(replicas))
}
func testScaledRolloutDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(10)
// Create a nginx deployment.
deploymentName := "nginx"
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
// Verify that the required pods have come up.
framework.Logf("Waiting for all required pods to come up")
err = framework.VerifyPodsRunning(f.ClientSet, ns, NginxImageName, false, *(deployment.Spec.Replicas))
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
first, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
// Update the deployment with a non-existent image so that the new replica set will be blocked.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
}
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
second, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
Expect(err).NotTo(HaveOccurred())
secondCond := replicaSetHasDesiredReplicas(c.Extensions(), second)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Updating the size (up) and template at the same time for deployment %q", deploymentName)
newReplicas := int32(20)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = NautilusImage
})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) {
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
Expect(err).NotTo(HaveOccurred())
}
// Update the deployment with a non-existent image so that the new replica set will be blocked.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
}
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
oldRs, err := c.Extensions().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
oldCond := replicaSetHasDesiredReplicas(c.Extensions(), oldRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
Expect(err).NotTo(HaveOccurred())
newCond := replicaSetHasDesiredReplicas(c.Extensions(), newRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Updating the size (down) and template at the same time for deployment %q", deploymentName)
newReplicas = int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = KittenImage
})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
Expect(err).NotTo(HaveOccurred())
for _, rs := range append(oldRSs, rs) {
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
Expect(err).NotTo(HaveOccurred())
}
}
func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create first deployment.
deploymentName := "first-deployment"
podLabels := map[string]string{"name": RedisImageName}
replicas := int32(1)
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred(), "Failed creating the first deployment")
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploy.Name, "1", RedisImage)
Expect(err).NotTo(HaveOccurred(), "The first deployment failed to update to revision 1")
// Create second deployment with overlapping selector.
deploymentName = "second-deployment"
framework.Logf("Creating deployment %q with overlapping selector", deploymentName)
podLabels["other-label"] = "random-label"
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deployOverlapping, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred(), "Failed creating the second deployment")
// Wait for it to be updated to revision 1
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "1", NginxImage)
Expect(err).NotTo(HaveOccurred(), "The second deployment failed to update to revision 1")
// Both deployments should proceed independently.
framework.Logf("Checking each deployment creates its own replica set")
options := metav1.ListOptions{}
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
Expect(rsList.Items).To(HaveLen(2))
}
func testFailedDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
// Create a nginx deployment.
deploymentName := "progress-check"
nonExistentImage := "nginx:not-there"
ten := int32(10)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, nonExistentImage, extensions.RecreateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &ten
framework.Logf("Creating deployment %q with progressDeadlineSeconds set to %ds and a non-existent image", deploymentName, ten)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q new replica set to come up", deploymentName)
Expect(framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, replicas, deployment.Generation))
framework.Logf("Checking deployment %q for a timeout condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.TimedOutReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
framework.Logf("Updating deployment %q with a good image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q new replica set to come up", deploymentName)
Expect(framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, replicas, deployment.Generation))
framework.Logf("Waiting for deployment %q status", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
}
func randomScale(d *extensions.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
framework.Logf("%02d: scaling up", i)
*(d.Spec.Replicas)++
case r < 0.6:
if *(d.Spec.Replicas) > 1 {
framework.Logf("%02d: scaling down", i)
*(d.Spec.Replicas)--
}
}
}
func testIterativeDeployments(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(6)
zero := int64(0)
two := int32(2)
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
iterations := 20
for i := 0; i < iterations; i++ {
if r := rand.Float32(); r < 0.6 {
time.Sleep(time.Duration(float32(i) * r * float32(time.Second)))
}
switch n := rand.Float32(); {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
rollbackTo := &extensions.RollbackConfig{Revision: 0}
update.Spec.RollbackTo = rollbackTo
})
Expect(err).NotTo(HaveOccurred())
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
case n < 0.8:
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
}
default:
// arbitrarily delete deployment pods
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(opts)
Expect(err).NotTo(HaveOccurred())
if len(podList.Items) == 0 {
framework.Logf("%02d: no deployment pods to delete", i)
continue
}
for p := range podList.Items {
if rand.Float32() < 0.5 {
continue
}
name := podList.Items[p].Name
framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.Core().Pods(ns).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}
}
}
// unpause the deployment if we end up pausing it
deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Spec.Paused {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false
})
}
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
framework.Logf("Waiting for deployment %q status", deploymentName)
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
}
func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
desiredGeneration := replicaSet.Generation
return func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil
}
}
func testDeploymentsControllerRef(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
deploymentName := "test-orphan-deployment"
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Checking its ReplicaSet has the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
err = orphanDeploymentReplicaSets(c, deploy)
Expect(err).NotTo(HaveOccurred())
By("Wait for the ReplicaSet to be orphaned")
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatusValid(c, deploy)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
Expect(err).NotTo(HaveOccurred())
}
func waitDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
err := checkDeploymentReplicaSetsControllerRef(c, ns, uid, label)
if err != nil {
return false, nil
}
return true, nil
}
}
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
rsList := listDeploymentReplicaSets(c, ns, label)
for _, rs := range rsList.Items {
// This rs is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef)
}
}
return nil
}
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
rsList := listDeploymentReplicaSets(c, ns, label)
for _, rs := range rsList.Items {
// This rs is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
return rsList
}
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.Extensions().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
"fmt"
"sort"
"sync"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/apimachinery/pkg/util/uuid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Density [Serial] [Slow]", func() {
f := framework.NewDefaultFramework("density-test-windows")
Context("create a batch of pods", func() {
// TODO(coufon): the values are generous, set more precise limits with benchmark data
// and add more tests
dTests := []densityTest{
{
podsNr: 10,
interval: 0 * time.Millisecond,
// percentile limit of single pod startup latency
podStartupLimits: framework.LatencyMetric{
Perc50: 30 * time.Second,
Perc90: 54 * time.Second,
Perc99: 59 * time.Second,
},
// upbound of startup latency of a batch of pods
podBatchStartupLimit: 10 * time.Minute,
},
}
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
It(desc, func() {
itArg.createMethod = "batch"
runDensityBatchTest(f, itArg)
})
}
})
})
type densityTest struct {
// number of pods
podsNr int
// number of background pods
bgPodsNr int
// interval between creating pod (rate control)
interval time.Duration
// create pods in 'batch' or 'sequence'
createMethod string
// API QPS limit
APIQPSLimit int
// performance limits
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
podStartupLimits framework.LatencyMetric
podBatchStartupLimit time.Duration
}
// runDensityBatchTest runs the density batch pod creation test
func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
const (
podType = "density_test_pod"
)
var (
mutex = &sync.Mutex{}
watchTimes = make(map[string]metav1.Time, 0)
stopCh = make(chan struct{})
)
// create test pod data structure
pods := newTestPods(testArg.podsNr, false, imageutils.GetPauseImageName(), podType)
// the controller watches the change of pod status
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
go controller.Run(stopCh)
defer close(stopCh)
By("Creating a batch of pods")
// It returns a map['pod name']'creation time' containing the creation timestamps
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
By("Waiting for all Pods to be observed by the watch...")
Eventually(func() bool {
return len(watchTimes) == testArg.podsNr
}, 10*time.Minute, 10*time.Second).Should(BeTrue())
if len(watchTimes) < testArg.podsNr {
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
}
// Analyze results
var (
firstCreate metav1.Time
lastRunning metav1.Time
init = true
e2eLags = make([]framework.PodLatencyData, 0)
)
for name, create := range createTimes {
watch, ok := watchTimes[name]
Expect(ok).To(Equal(true))
e2eLags = append(e2eLags,
framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
if !init {
if firstCreate.Time.After(create.Time) {
firstCreate = create
}
if lastRunning.Time.Before(watch.Time) {
lastRunning = watch
}
} else {
init = false
firstCreate, lastRunning = create, watch
}
}
sort.Sort(framework.LatencySlice(e2eLags))
batchLag := lastRunning.Time.Sub(firstCreate.Time)
deletePodsSync(f, pods)
return batchLag, e2eLags
}
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
createTimes := make(map[string]metav1.Time)
for _, pod := range pods {
createTimes[pod.ObjectMeta.Name] = metav1.Now()
go f.PodClient().Create(pod)
time.Sleep(interval)
}
return createTimes
}
// newInformerWatchPod creates an informer to check whether all pods are running.
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
ns := f.Namespace.Name
checkPodRunning := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = metav1.Now()
}
}
}
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*v1.Pod)
Expect(ok).To(Equal(true))
go checkPodRunning(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*v1.Pod)
Expect(ok).To(Equal(true))
go checkPodRunning(p)
},
},
)
return controller
}
// newTestPods creates a list of pods (specification) for test.
func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{
"type": podType,
"name": podName,
}
if volume {
pods = append(pods,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: imageName,
Name: podName,
VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
},
},
},
NodeSelector: map[string]string{
"beta.kubernetes.io/os": "windows",
},
Volumes: []v1.Volume{
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
},
},
})
} else {
pods = append(pods,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: imageName,
Name: podName,
},
},
NodeSelector: map[string]string{
"beta.kubernetes.io/os": "windows",
},
},
})
}
}
return pods
}
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
}(pod)
}
wg.Wait()
return
}
Adding Feature tag
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
"fmt"
"sort"
"sync"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/apimachinery/pkg/util/uuid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() {
f := framework.NewDefaultFramework("density-test-windows")
Context("create a batch of pods", func() {
// TODO(coufon): the values are generous, set more precise limits with benchmark data
// and add more tests
dTests := []densityTest{
{
podsNr: 10,
interval: 0 * time.Millisecond,
// percentile limit of single pod startup latency
podStartupLimits: framework.LatencyMetric{
Perc50: 30 * time.Second,
Perc90: 54 * time.Second,
Perc99: 59 * time.Second,
},
// upbound of startup latency of a batch of pods
podBatchStartupLimit: 10 * time.Minute,
},
}
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
It(desc, func() {
itArg.createMethod = "batch"
runDensityBatchTest(f, itArg)
})
}
})
})
type densityTest struct {
// number of pods
podsNr int
// number of background pods
bgPodsNr int
// interval between creating pod (rate control)
interval time.Duration
// create pods in 'batch' or 'sequence'
createMethod string
// API QPS limit
APIQPSLimit int
// performance limits
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
podStartupLimits framework.LatencyMetric
podBatchStartupLimit time.Duration
}
// runDensityBatchTest runs the density batch pod creation test
func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
const (
podType = "density_test_pod"
)
var (
mutex = &sync.Mutex{}
watchTimes = make(map[string]metav1.Time, 0)
stopCh = make(chan struct{})
)
// create test pod data structure
pods := newTestPods(testArg.podsNr, false, imageutils.GetPauseImageName(), podType)
// the controller watches the change of pod status
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
go controller.Run(stopCh)
defer close(stopCh)
By("Creating a batch of pods")
// It returns a map['pod name']'creation time' containing the creation timestamps
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
By("Waiting for all Pods to be observed by the watch...")
Eventually(func() bool {
return len(watchTimes) == testArg.podsNr
}, 10*time.Minute, 10*time.Second).Should(BeTrue())
if len(watchTimes) < testArg.podsNr {
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
}
// Analyze results
var (
firstCreate metav1.Time
lastRunning metav1.Time
init = true
e2eLags = make([]framework.PodLatencyData, 0)
)
for name, create := range createTimes {
watch, ok := watchTimes[name]
Expect(ok).To(Equal(true))
e2eLags = append(e2eLags,
framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
if !init {
if firstCreate.Time.After(create.Time) {
firstCreate = create
}
if lastRunning.Time.Before(watch.Time) {
lastRunning = watch
}
} else {
init = false
firstCreate, lastRunning = create, watch
}
}
sort.Sort(framework.LatencySlice(e2eLags))
batchLag := lastRunning.Time.Sub(firstCreate.Time)
deletePodsSync(f, pods)
return batchLag, e2eLags
}
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
createTimes := make(map[string]metav1.Time)
for _, pod := range pods {
createTimes[pod.ObjectMeta.Name] = metav1.Now()
go f.PodClient().Create(pod)
time.Sleep(interval)
}
return createTimes
}
// newInformerWatchPod creates an informer to check whether all pods are running.
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
ns := f.Namespace.Name
checkPodRunning := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = metav1.Now()
}
}
}
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*v1.Pod)
Expect(ok).To(Equal(true))
go checkPodRunning(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*v1.Pod)
Expect(ok).To(Equal(true))
go checkPodRunning(p)
},
},
)
return controller
}
// newTestPods creates a list of pods (specification) for test.
func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{
"type": podType,
"name": podName,
}
if volume {
pods = append(pods,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: imageName,
Name: podName,
VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
},
},
},
NodeSelector: map[string]string{
"beta.kubernetes.io/os": "windows",
},
Volumes: []v1.Volume{
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
},
},
})
} else {
pods = append(pods,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: imageName,
Name: podName,
},
},
NodeSelector: map[string]string{
"beta.kubernetes.io/os": "windows",
},
},
})
}
}
return pods
}
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
}(pod)
}
wg.Wait()
return
}
|
package consul
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net/url"
"strings"
"testing"
"time"
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/consul-net-rpc/net/rpc"
vaultapi "github.com/hashicorp/vault/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/connect"
ca "github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/agent/consul/fsm"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
)
// TODO(kyhavlov): replace with t.Deadline()
const CATestTimeout = 7 * time.Second
func TestCAManager_Initialize_Vault_Secondary_SharedVault(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
_, serverDC1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-primary/",
},
}
})
runStep(t, "check primary DC", func(t *testing.T) {
testrpc.WaitForTestAgent(t, serverDC1.RPC, "dc1")
codec := rpcClient(t, serverDC1)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafPEM)
})
runStep(t, "start secondary DC", func(t *testing.T) {
_, serverDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-secondary/",
},
}
})
joinWAN(t, serverDC2, serverDC1)
testrpc.WaitForActiveCARoot(t, serverDC2.RPC, "dc2", nil)
codec := rpcClient(t, serverDC2)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
})
}
func verifyLeafCert(t *testing.T, root *structs.CARoot, leafCertPEM string) {
t.Helper()
roots := structs.IndexedCARoots{
ActiveRootID: root.ID,
Roots: []*structs.CARoot{root},
}
verifyLeafCertWithRoots(t, roots, leafCertPEM)
}
func verifyLeafCertWithRoots(t *testing.T, roots structs.IndexedCARoots, leafCertPEM string) {
t.Helper()
leaf, intermediates, err := connect.ParseLeafCerts(leafCertPEM)
require.NoError(t, err)
pool := x509.NewCertPool()
for _, r := range roots.Roots {
ok := pool.AppendCertsFromPEM([]byte(r.RootCert))
if !ok {
t.Fatalf("Failed to add root CA PEM to cert pool")
}
}
// verify with intermediates from leaf CertPEM
_, err = leaf.Verify(x509.VerifyOptions{
Roots: pool,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
require.NoError(t, err, "failed to verify using intermediates from leaf cert PEM")
// verify with intermediates from the CARoot
intermediates = x509.NewCertPool()
for _, r := range roots.Roots {
for _, intermediate := range r.IntermediateCerts {
c, err := connect.ParseCert(intermediate)
require.NoError(t, err)
intermediates.AddCert(c)
}
}
_, err = leaf.Verify(x509.VerifyOptions{
Roots: pool,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
require.NoError(t, err, "failed to verify using intermediates from CARoot list")
}
type mockCAServerDelegate struct {
t *testing.T
config *Config
store *state.Store
primaryRoot *structs.CARoot
secondaryIntermediate string
callbackCh chan string
}
func NewMockCAServerDelegate(t *testing.T, config *Config) *mockCAServerDelegate {
delegate := &mockCAServerDelegate{
t: t,
config: config,
store: state.NewStateStore(nil),
primaryRoot: connect.TestCAWithTTL(t, nil, 1*time.Second),
callbackCh: make(chan string, 0),
}
delegate.store.CASetConfig(1, testCAConfig())
return delegate
}
func (m *mockCAServerDelegate) State() *state.Store {
return m.store
}
func (m *mockCAServerDelegate) ProviderState(id string) (*structs.CAConsulProviderState, error) {
_, s, err := m.store.CAProviderState(id)
return s, err
}
func (m *mockCAServerDelegate) IsLeader() bool {
return true
}
func (m *mockCAServerDelegate) ServersSupportMultiDCConnectCA() error {
return nil
}
func (m *mockCAServerDelegate) ApplyCALeafRequest() (uint64, error) {
return 3, nil
}
// ApplyCARequest mirrors FSM.applyConnectCAOperation because that functionality
// is not exported.
func (m *mockCAServerDelegate) ApplyCARequest(req *structs.CARequest) (interface{}, error) {
idx, _, err := m.store.CAConfig(nil)
if err != nil {
return nil, err
}
m.callbackCh <- fmt.Sprintf("raftApply/ConnectCA")
result := fsm.ApplyConnectCAOperationFromRequest(m.store, req, idx+1)
if err, ok := result.(error); ok && err != nil {
return nil, err
}
return result, nil
}
func (m *mockCAServerDelegate) forwardDC(method, dc string, args interface{}, reply interface{}) error {
switch method {
case "ConnectCA.Roots":
roots := reply.(*structs.IndexedCARoots)
roots.TrustDomain = connect.TestClusterID
roots.Roots = []*structs.CARoot{m.primaryRoot}
roots.ActiveRootID = m.primaryRoot.ID
case "ConnectCA.SignIntermediate":
r := reply.(*string)
*r = m.secondaryIntermediate
default:
return fmt.Errorf("received call to unsupported method %q", method)
}
m.callbackCh <- fmt.Sprintf("forwardDC/%s", method)
return nil
}
func (m *mockCAServerDelegate) generateCASignRequest(csr string) *structs.CASignRequest {
return &structs.CASignRequest{
Datacenter: m.config.PrimaryDatacenter,
CSR: csr,
}
}
// mockCAProvider mocks an empty provider implementation with a channel in order to coordinate
// waiting for certain methods to be called.
type mockCAProvider struct {
callbackCh chan string
rootPEM string
intermediatePem string
}
func (m *mockCAProvider) Configure(cfg ca.ProviderConfig) error { return nil }
func (m *mockCAProvider) State() (map[string]string, error) { return nil, nil }
func (m *mockCAProvider) GenerateRoot() (ca.RootResult, error) {
return ca.RootResult{PEM: m.rootPEM}, nil
}
func (m *mockCAProvider) GenerateIntermediateCSR() (string, error) {
m.callbackCh <- "provider/GenerateIntermediateCSR"
return "", nil
}
func (m *mockCAProvider) SetIntermediate(intermediatePEM, rootPEM string) error {
m.callbackCh <- "provider/SetIntermediate"
return nil
}
func (m *mockCAProvider) ActiveIntermediate() (string, error) {
if m.intermediatePem == "" {
return m.rootPEM, nil
}
return m.intermediatePem, nil
}
func (m *mockCAProvider) GenerateIntermediate() (string, error) { return "", nil }
func (m *mockCAProvider) Sign(*x509.CertificateRequest) (string, error) { return "", nil }
func (m *mockCAProvider) SignIntermediate(*x509.CertificateRequest) (string, error) { return "", nil }
func (m *mockCAProvider) CrossSignCA(*x509.Certificate) (string, error) { return "", nil }
func (m *mockCAProvider) SupportsCrossSigning() (bool, error) { return false, nil }
func (m *mockCAProvider) Cleanup(_ bool, _ map[string]interface{}) error { return nil }
func waitForCh(t *testing.T, ch chan string, expected string) {
t.Helper()
select {
case op := <-ch:
if op != expected {
t.Fatalf("got unexpected op %q, wanted %q", op, expected)
}
case <-time.After(CATestTimeout):
t.Fatalf("never got op %q", expected)
}
}
func waitForEmptyCh(t *testing.T, ch chan string) {
select {
case op := <-ch:
t.Fatalf("got unexpected op %q", op)
case <-time.After(1 * time.Second):
}
}
func testCAConfig() *structs.CAConfiguration {
return &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: "mock",
Config: map[string]interface{}{
"LeafCertTTL": "72h",
"IntermediateCertTTL": "2160h",
},
}
}
// initTestManager initializes a CAManager with a mockCAServerDelegate, consuming
// the ops that come through the channels and returning when initialization has finished.
func initTestManager(t *testing.T, manager *CAManager, delegate *mockCAServerDelegate) {
t.Helper()
initCh := make(chan struct{})
go func() {
require.NoError(t, manager.Initialize())
close(initCh)
}()
for i := 0; i < 5; i++ {
select {
case <-delegate.callbackCh:
case <-time.After(CATestTimeout):
t.Fatal("failed waiting for initialization events")
}
}
select {
case <-initCh:
case <-time.After(CATestTimeout):
t.Fatal("failed waiting for initialization")
}
}
func TestCAManager_Initialize(t *testing.T) {
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: delegate.primaryRoot.RootCert,
}
// Call Initialize and then confirm the RPCs and provider calls
// happen in the expected order.
require.Equal(t, caStateUninitialized, manager.state)
errCh := make(chan error)
go func() {
err := manager.Initialize()
assert.NoError(t, err)
errCh <- err
}()
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.Roots")
require.EqualValues(t, caStateInitializing, manager.state)
waitForCh(t, delegate.callbackCh, "provider/GenerateIntermediateCSR")
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.SignIntermediate")
waitForCh(t, delegate.callbackCh, "provider/SetIntermediate")
waitForCh(t, delegate.callbackCh, "raftApply/ConnectCA")
waitForEmptyCh(t, delegate.callbackCh)
// Make sure the Initialize call returned successfully.
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(CATestTimeout):
t.Fatal("never got result from errCh")
}
require.Equal(t, caStateInitialized, manager.state)
}
func TestCAManager_UpdateConfigWhileRenewIntermediate(t *testing.T) {
// No parallel execution because we change globals
patchIntermediateCertRenewInterval(t)
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: delegate.primaryRoot.RootCert,
}
initTestManager(t, manager, delegate)
// Simulate Wait half the TTL for the cert to need renewing.
manager.timeNow = func() time.Time {
return time.Now().Add(500 * time.Millisecond)
}
// Call RenewIntermediate and then confirm the RPCs and provider calls
// happen in the expected order.
errCh := make(chan error)
go func() {
errCh <- manager.RenewIntermediate(context.TODO(), false)
}()
waitForCh(t, delegate.callbackCh, "provider/GenerateIntermediateCSR")
// Call UpdateConfiguration while RenewIntermediate is still in-flight to
// make sure we get an error about the state being occupied.
go func() {
require.EqualValues(t, caStateRenewIntermediate, manager.state)
require.Error(t, errors.New("already in state"), manager.UpdateConfiguration(&structs.CARequest{}))
}()
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.SignIntermediate")
waitForCh(t, delegate.callbackCh, "provider/SetIntermediate")
waitForCh(t, delegate.callbackCh, "raftApply/ConnectCA")
waitForEmptyCh(t, delegate.callbackCh)
// Make sure the RenewIntermediate call returned successfully.
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(CATestTimeout):
t.Fatal("never got result from errCh")
}
require.EqualValues(t, caStateInitialized, manager.state)
}
func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
args := []struct {
testName string
notBeforeRoot time.Time
notAfterRoot time.Time
notBeforeIntermediate time.Time
notAfterIntermediate time.Time
isError bool
errorMsg string
}{
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
// a cert that is not yet valid is ok, assume it will be valid soon enough
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
{"root in the future", time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
}
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
require.NoError(t, err, "failed to generate key")
for _, arg := range args {
t.Run(arg.testName, func(t *testing.T) {
// No parallel execution because we change globals
// Set the interval and drift buffer low for renewing the cert.
origInterval := structs.IntermediateCertRenewInterval
origDriftBuffer := ca.CertificateTimeDriftBuffer
defer func() {
structs.IntermediateCertRenewInterval = origInterval
ca.CertificateTimeDriftBuffer = origDriftBuffer
}()
structs.IntermediateCertRenewInterval = time.Millisecond
ca.CertificateTimeDriftBuffer = 0
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
rootPEM := generateCertPEM(t, caPrivKey, arg.notBeforeRoot, arg.notAfterRoot)
intermediatePEM := generateCertPEM(t, caPrivKey, arg.notBeforeIntermediate, arg.notAfterIntermediate)
delegate := NewMockCAServerDelegate(t, conf)
delegate.primaryRoot.RootCert = rootPEM
delegate.secondaryIntermediate = intermediatePEM
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: rootPEM,
intermediatePem: intermediatePEM,
}
initTestManager(t, manager, delegate)
// Simulate Wait half the TTL for the cert to need renewing.
manager.timeNow = func() time.Time {
return time.Now().UTC().Add(500 * time.Millisecond)
}
// Call RenewIntermediate and then confirm the RPCs and provider calls
// happen in the expected order.
_, err := manager.SignCertificate(&x509.CertificateRequest{}, &connect.SpiffeIDAgent{})
if arg.isError {
require.Error(t, err)
require.Contains(t, err.Error(), arg.errorMsg)
} else {
require.NoError(t, err)
}
})
}
}
func generateCertPEM(t *testing.T, caPrivKey *rsa.PrivateKey, notBefore time.Time, notAfter time.Time) string {
t.Helper()
ca := &x509.Certificate{
SerialNumber: big.NewInt(2019),
Subject: pkix.Name{
Organization: []string{"Company, INC."},
Country: []string{"US"},
Province: []string{""},
Locality: []string{"San Francisco"},
StreetAddress: []string{"Golden Gate Bridge"},
PostalCode: []string{"94016"},
},
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
URIs: []*url.URL{connect.SpiffeIDAgent{Host: "foo"}.URI()},
}
caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey)
require.NoError(t, err, "failed to create cert")
caPEM := new(bytes.Buffer)
err = pem.Encode(caPEM, &pem.Block{
Type: "CERTIFICATE",
Bytes: caBytes,
})
require.NoError(t, err, "failed to encode")
return caPEM.String()
}
func TestCADelegateWithState_GenerateCASignRequest(t *testing.T) {
s := Server{config: &Config{PrimaryDatacenter: "east"}, tokens: new(token.Store)}
d := &caDelegateWithState{Server: &s}
req := d.generateCASignRequest("A")
require.Equal(t, "east", req.RequestDatacenter())
}
func TestCAManager_Initialize_Logging(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, conf1 := testServerConfig(t)
// Setup dummy logger to catch output
var buf bytes.Buffer
logger := testutil.LoggerWithOutput(t, &buf)
deps := newDefaultDeps(t, conf1)
deps.Logger = logger
s1, err := NewServer(conf1, deps)
require.NoError(t, err)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Wait til CA root is setup
retry.Run(t, func(r *retry.R) {
var out structs.IndexedCARoots
r.Check(s1.RPC("ConnectCA.Roots", structs.DCSpecificRequest{
Datacenter: conf1.Datacenter,
}, &out))
})
require.Contains(t, buf.String(), "consul CA provider configured")
}
func TestCAManager_UpdateConfiguration_Vault_Primary(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
_, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
defer func() {
s1.Shutdown()
s1.leaderRoutineManager.Wait()
}()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
_, origRoot, err := s1.fsm.State().CARootActive(nil)
require.NoError(t, err)
require.Len(t, origRoot.IntermediateCerts, 1)
cert, err := connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(origRoot))
require.NoError(t, err)
require.Equal(t, connect.HexString(cert.SubjectKeyId), origRoot.SigningKeyID)
err = s1.caManager.UpdateConfiguration(&structs.CARequest{
Config: &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root-2/",
"IntermediatePKIPath": "pki-intermediate-2/",
},
},
})
require.NoError(t, err)
_, newRoot, err := s1.fsm.State().CARootActive(nil)
require.NoError(t, err)
require.Len(t, newRoot.IntermediateCerts, 2,
"expected one cross-sign cert and one local leaf sign cert")
require.NotEqual(t, origRoot.ID, newRoot.ID)
cert, err = connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(newRoot))
require.NoError(t, err)
require.Equal(t, connect.HexString(cert.SubjectKeyId), newRoot.SigningKeyID)
}
func TestCAManager_Initialize_Vault_WithIntermediateAsPrimaryCA(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
vclient := vault.Client()
generateExternalRootCA(t, vclient)
meshRootPath := "pki-root"
primaryCert := setupPrimaryCA(t, vclient, meshRootPath, "")
_, s1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": meshRootPath,
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
runStep(t, "check primary DC", func(t *testing.T) {
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
codec := rpcClient(t, s1)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Equal(t, primaryCert, roots.Roots[0].RootCert)
leafCertPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafCertPEM)
})
// TODO: renew primary leaf signing cert
// TODO: rotate root
runStep(t, "run secondary DC", func(t *testing.T) {
_, sDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": meshRootPath,
"IntermediatePKIPath": "pki-secondary/",
},
}
})
defer sDC2.Shutdown()
joinWAN(t, sDC2, s1)
testrpc.WaitForActiveCARoot(t, sDC2.RPC, "dc2", nil)
codec := rpcClient(t, sDC2)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafCertPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafCertPEM)
// TODO: renew secondary leaf signing cert
})
}
func getLeafCert(t *testing.T, codec rpc.ClientCodec, trustDomain string, dc string) string {
pk, _, err := connect.GeneratePrivateKey()
require.NoError(t, err)
spiffeID := &connect.SpiffeIDService{
Host: trustDomain,
Service: "srv1",
Datacenter: dc,
}
csr, err := connect.CreateCSR(spiffeID, pk, nil, nil)
require.NoError(t, err)
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
return cert.CertPEM
}
func TestCAManager_Initialize_Vault_WithExternalTrustedCA(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
vclient := vault.Client()
rootPEM := generateExternalRootCA(t, vclient)
primaryCAPath := "pki-primary"
primaryCert := setupPrimaryCA(t, vclient, primaryCAPath, rootPEM)
_, serverDC1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": primaryCAPath,
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
testrpc.WaitForTestAgent(t, serverDC1.RPC, "dc1")
var origLeaf string
roots := structs.IndexedCARoots{}
runStep(t, "verify primary DC", func(t *testing.T) {
codec := rpcClient(t, serverDC1)
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Equal(t, primaryCert, roots.Roots[0].RootCert)
require.Contains(t, roots.Roots[0].RootCert, rootPEM)
leafCert := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafCert)
origLeaf = leafCert
})
_, serverDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "should-be-ignored",
"IntermediatePKIPath": "pki-secondary/",
},
}
})
runStep(t, "start secondary DC", func(t *testing.T) {
joinWAN(t, serverDC2, serverDC1)
testrpc.WaitForActiveCARoot(t, serverDC2.RPC, "dc2", nil)
codec := rpcClient(t, serverDC2)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
})
runStep(t, "renew leaf signing CA in primary", func(t *testing.T) {
previous := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
renewLeafSigningCert(t, serverDC1.caManager, serverDC1.caManager.primaryRenewIntermediate)
codec := rpcClient(t, serverDC1)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Len(t, roots.Roots[0].IntermediateCerts, 2)
newCert := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
require.NotEqual(t, previous, newCert)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafPEM)
// original certs from old signing cert should still verify
verifyLeafCert(t, roots.Roots[0], origLeaf)
})
runStep(t, "renew leaf signing CA in secondary", func(t *testing.T) {
previous := serverDC2.caManager.getLeafSigningCertFromRoot(roots.Active())
renewLeafSigningCert(t, serverDC2.caManager, serverDC2.caManager.secondaryRequestNewSigningCert)
codec := rpcClient(t, serverDC2)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
// one intermediate from primary, two from secondary
require.Len(t, roots.Roots[0].IntermediateCerts, 3)
newCert := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
require.NotEqual(t, previous, newCert)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
// original certs from old signing cert should still verify
verifyLeafCert(t, roots.Roots[0], origLeaf)
})
runStep(t, "rotate root by changing the provider", func(t *testing.T) {
codec := rpcClient(t, serverDC1)
req := &structs.CARequest{
Op: structs.CAOpSetConfig,
Config: &structs.CAConfiguration{
Provider: "consul",
},
}
var resp error
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", req, &resp)
require.NoError(t, err)
require.Nil(t, resp)
roots = structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 2)
active := roots.Active()
require.Len(t, active.IntermediateCerts, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafPEM)
// original certs from old root cert should still verify
verifyLeafCertWithRoots(t, roots, origLeaf)
})
runStep(t, "rotate to a different external root", func(t *testing.T) {
setupPrimaryCA(t, vclient, "pki-primary-2/", rootPEM)
codec := rpcClient(t, serverDC1)
req := &structs.CARequest{
Op: structs.CAOpSetConfig,
Config: &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-primary-2/",
"IntermediatePKIPath": "pki-intermediate-2/",
},
},
}
var resp error
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", req, &resp)
require.NoError(t, err)
require.Nil(t, resp)
roots = structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 3)
active := roots.Active()
require.Len(t, active.IntermediateCerts, 2)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafPEM)
// original certs from old root cert should still verify
verifyLeafCertWithRoots(t, roots, origLeaf)
})
}
// renewLeafSigningCert mimics RenewIntermediate. This is unfortunate, but
// necessary for now as there is no easy way to invoke that logic unconditionally.
// Currently, it requires patching values and polling for the operation to
// complete, which adds a lot of distractions to a test case.
// With this function we can instead unconditionally rotate the leaf signing cert
// synchronously.
func renewLeafSigningCert(t *testing.T, manager *CAManager, fn func(ca.Provider, *structs.CARoot) error) {
t.Helper()
provider, _ := manager.getCAProvider()
store := manager.delegate.State()
_, root, err := store.CARootActive(nil)
require.NoError(t, err)
activeRoot := root.Clone()
err = fn(provider, activeRoot)
require.NoError(t, err)
err = manager.persistNewRootAndConfig(provider, activeRoot, nil)
require.NoError(t, err)
manager.setCAProvider(provider, activeRoot)
}
func generateExternalRootCA(t *testing.T, client *vaultapi.Client) string {
t.Helper()
err := client.Sys().Mount("corp", &vaultapi.MountInput{
Type: "pki",
Description: "External root, probably corporate CA",
Config: vaultapi.MountConfigInput{
MaxLeaseTTL: "2400h",
DefaultLeaseTTL: "1h",
},
})
require.NoError(t, err, "failed to mount")
resp, err := client.Logical().Write("corp/root/generate/internal", map[string]interface{}{
"common_name": "corporate CA",
"ttl": "2400h",
})
require.NoError(t, err, "failed to generate root")
return ca.EnsureTrailingNewline(resp.Data["certificate"].(string))
}
func setupPrimaryCA(t *testing.T, client *vaultapi.Client, path string, rootPEM string) string {
t.Helper()
err := client.Sys().Mount(path, &vaultapi.MountInput{
Type: "pki",
Description: "primary CA for Consul CA",
Config: vaultapi.MountConfigInput{
MaxLeaseTTL: "2200h",
DefaultLeaseTTL: "1h",
},
})
require.NoError(t, err, "failed to mount")
out, err := client.Logical().Write(path+"/intermediate/generate/internal", map[string]interface{}{
"common_name": "primary CA",
"ttl": "2200h",
"key_type": "ec",
"key_bits": 256,
})
require.NoError(t, err, "failed to generate root")
intermediate, err := client.Logical().Write("corp/root/sign-intermediate", map[string]interface{}{
"csr": out.Data["csr"],
"use_csr_values": true,
"format": "pem_bundle",
"ttl": "2200h",
})
require.NoError(t, err, "failed to sign intermediate")
var buf strings.Builder
buf.WriteString(ca.EnsureTrailingNewline(intermediate.Data["certificate"].(string)))
buf.WriteString(ca.EnsureTrailingNewline(rootPEM))
_, err = client.Logical().Write(path+"/intermediate/set-signed", map[string]interface{}{
"certificate": buf.String(),
})
require.NoError(t, err, "failed to set signed intermediate")
return ca.EnsureTrailingNewline(buf.String())
}
ca: test that original certs from secondary still verify
There's a chance this could flake if the secondary hasn't received the
update yet, but running this test many times doesn't show any flakes
yet.
package consul
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net/url"
"strings"
"testing"
"time"
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/consul-net-rpc/net/rpc"
vaultapi "github.com/hashicorp/vault/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/connect"
ca "github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/agent/consul/fsm"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
)
// TODO(kyhavlov): replace with t.Deadline()
const CATestTimeout = 7 * time.Second
func TestCAManager_Initialize_Vault_Secondary_SharedVault(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
_, serverDC1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-primary/",
},
}
})
runStep(t, "check primary DC", func(t *testing.T) {
testrpc.WaitForTestAgent(t, serverDC1.RPC, "dc1")
codec := rpcClient(t, serverDC1)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafPEM)
})
runStep(t, "start secondary DC", func(t *testing.T) {
_, serverDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-secondary/",
},
}
})
joinWAN(t, serverDC2, serverDC1)
testrpc.WaitForActiveCARoot(t, serverDC2.RPC, "dc2", nil)
codec := rpcClient(t, serverDC2)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
})
}
func verifyLeafCert(t *testing.T, root *structs.CARoot, leafCertPEM string) {
t.Helper()
roots := structs.IndexedCARoots{
ActiveRootID: root.ID,
Roots: []*structs.CARoot{root},
}
verifyLeafCertWithRoots(t, roots, leafCertPEM)
}
func verifyLeafCertWithRoots(t *testing.T, roots structs.IndexedCARoots, leafCertPEM string) {
t.Helper()
leaf, intermediates, err := connect.ParseLeafCerts(leafCertPEM)
require.NoError(t, err)
pool := x509.NewCertPool()
for _, r := range roots.Roots {
ok := pool.AppendCertsFromPEM([]byte(r.RootCert))
if !ok {
t.Fatalf("Failed to add root CA PEM to cert pool")
}
}
// verify with intermediates from leaf CertPEM
_, err = leaf.Verify(x509.VerifyOptions{
Roots: pool,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
require.NoError(t, err, "failed to verify using intermediates from leaf cert PEM")
// verify with intermediates from the CARoot
intermediates = x509.NewCertPool()
for _, r := range roots.Roots {
for _, intermediate := range r.IntermediateCerts {
c, err := connect.ParseCert(intermediate)
require.NoError(t, err)
intermediates.AddCert(c)
}
}
_, err = leaf.Verify(x509.VerifyOptions{
Roots: pool,
Intermediates: intermediates,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
require.NoError(t, err, "failed to verify using intermediates from CARoot list")
}
type mockCAServerDelegate struct {
t *testing.T
config *Config
store *state.Store
primaryRoot *structs.CARoot
secondaryIntermediate string
callbackCh chan string
}
func NewMockCAServerDelegate(t *testing.T, config *Config) *mockCAServerDelegate {
delegate := &mockCAServerDelegate{
t: t,
config: config,
store: state.NewStateStore(nil),
primaryRoot: connect.TestCAWithTTL(t, nil, 1*time.Second),
callbackCh: make(chan string, 0),
}
delegate.store.CASetConfig(1, testCAConfig())
return delegate
}
func (m *mockCAServerDelegate) State() *state.Store {
return m.store
}
func (m *mockCAServerDelegate) ProviderState(id string) (*structs.CAConsulProviderState, error) {
_, s, err := m.store.CAProviderState(id)
return s, err
}
func (m *mockCAServerDelegate) IsLeader() bool {
return true
}
func (m *mockCAServerDelegate) ServersSupportMultiDCConnectCA() error {
return nil
}
func (m *mockCAServerDelegate) ApplyCALeafRequest() (uint64, error) {
return 3, nil
}
// ApplyCARequest mirrors FSM.applyConnectCAOperation because that functionality
// is not exported.
func (m *mockCAServerDelegate) ApplyCARequest(req *structs.CARequest) (interface{}, error) {
idx, _, err := m.store.CAConfig(nil)
if err != nil {
return nil, err
}
m.callbackCh <- fmt.Sprintf("raftApply/ConnectCA")
result := fsm.ApplyConnectCAOperationFromRequest(m.store, req, idx+1)
if err, ok := result.(error); ok && err != nil {
return nil, err
}
return result, nil
}
func (m *mockCAServerDelegate) forwardDC(method, dc string, args interface{}, reply interface{}) error {
switch method {
case "ConnectCA.Roots":
roots := reply.(*structs.IndexedCARoots)
roots.TrustDomain = connect.TestClusterID
roots.Roots = []*structs.CARoot{m.primaryRoot}
roots.ActiveRootID = m.primaryRoot.ID
case "ConnectCA.SignIntermediate":
r := reply.(*string)
*r = m.secondaryIntermediate
default:
return fmt.Errorf("received call to unsupported method %q", method)
}
m.callbackCh <- fmt.Sprintf("forwardDC/%s", method)
return nil
}
func (m *mockCAServerDelegate) generateCASignRequest(csr string) *structs.CASignRequest {
return &structs.CASignRequest{
Datacenter: m.config.PrimaryDatacenter,
CSR: csr,
}
}
// mockCAProvider mocks an empty provider implementation with a channel in order to coordinate
// waiting for certain methods to be called.
type mockCAProvider struct {
callbackCh chan string
rootPEM string
intermediatePem string
}
func (m *mockCAProvider) Configure(cfg ca.ProviderConfig) error { return nil }
func (m *mockCAProvider) State() (map[string]string, error) { return nil, nil }
func (m *mockCAProvider) GenerateRoot() (ca.RootResult, error) {
return ca.RootResult{PEM: m.rootPEM}, nil
}
func (m *mockCAProvider) GenerateIntermediateCSR() (string, error) {
m.callbackCh <- "provider/GenerateIntermediateCSR"
return "", nil
}
func (m *mockCAProvider) SetIntermediate(intermediatePEM, rootPEM string) error {
m.callbackCh <- "provider/SetIntermediate"
return nil
}
func (m *mockCAProvider) ActiveIntermediate() (string, error) {
if m.intermediatePem == "" {
return m.rootPEM, nil
}
return m.intermediatePem, nil
}
func (m *mockCAProvider) GenerateIntermediate() (string, error) { return "", nil }
func (m *mockCAProvider) Sign(*x509.CertificateRequest) (string, error) { return "", nil }
func (m *mockCAProvider) SignIntermediate(*x509.CertificateRequest) (string, error) { return "", nil }
func (m *mockCAProvider) CrossSignCA(*x509.Certificate) (string, error) { return "", nil }
func (m *mockCAProvider) SupportsCrossSigning() (bool, error) { return false, nil }
func (m *mockCAProvider) Cleanup(_ bool, _ map[string]interface{}) error { return nil }
func waitForCh(t *testing.T, ch chan string, expected string) {
t.Helper()
select {
case op := <-ch:
if op != expected {
t.Fatalf("got unexpected op %q, wanted %q", op, expected)
}
case <-time.After(CATestTimeout):
t.Fatalf("never got op %q", expected)
}
}
func waitForEmptyCh(t *testing.T, ch chan string) {
select {
case op := <-ch:
t.Fatalf("got unexpected op %q", op)
case <-time.After(1 * time.Second):
}
}
func testCAConfig() *structs.CAConfiguration {
return &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: "mock",
Config: map[string]interface{}{
"LeafCertTTL": "72h",
"IntermediateCertTTL": "2160h",
},
}
}
// initTestManager initializes a CAManager with a mockCAServerDelegate, consuming
// the ops that come through the channels and returning when initialization has finished.
func initTestManager(t *testing.T, manager *CAManager, delegate *mockCAServerDelegate) {
t.Helper()
initCh := make(chan struct{})
go func() {
require.NoError(t, manager.Initialize())
close(initCh)
}()
for i := 0; i < 5; i++ {
select {
case <-delegate.callbackCh:
case <-time.After(CATestTimeout):
t.Fatal("failed waiting for initialization events")
}
}
select {
case <-initCh:
case <-time.After(CATestTimeout):
t.Fatal("failed waiting for initialization")
}
}
func TestCAManager_Initialize(t *testing.T) {
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: delegate.primaryRoot.RootCert,
}
// Call Initialize and then confirm the RPCs and provider calls
// happen in the expected order.
require.Equal(t, caStateUninitialized, manager.state)
errCh := make(chan error)
go func() {
err := manager.Initialize()
assert.NoError(t, err)
errCh <- err
}()
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.Roots")
require.EqualValues(t, caStateInitializing, manager.state)
waitForCh(t, delegate.callbackCh, "provider/GenerateIntermediateCSR")
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.SignIntermediate")
waitForCh(t, delegate.callbackCh, "provider/SetIntermediate")
waitForCh(t, delegate.callbackCh, "raftApply/ConnectCA")
waitForEmptyCh(t, delegate.callbackCh)
// Make sure the Initialize call returned successfully.
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(CATestTimeout):
t.Fatal("never got result from errCh")
}
require.Equal(t, caStateInitialized, manager.state)
}
func TestCAManager_UpdateConfigWhileRenewIntermediate(t *testing.T) {
// No parallel execution because we change globals
patchIntermediateCertRenewInterval(t)
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
delegate := NewMockCAServerDelegate(t, conf)
delegate.secondaryIntermediate = delegate.primaryRoot.RootCert
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: delegate.primaryRoot.RootCert,
}
initTestManager(t, manager, delegate)
// Simulate Wait half the TTL for the cert to need renewing.
manager.timeNow = func() time.Time {
return time.Now().Add(500 * time.Millisecond)
}
// Call RenewIntermediate and then confirm the RPCs and provider calls
// happen in the expected order.
errCh := make(chan error)
go func() {
errCh <- manager.RenewIntermediate(context.TODO(), false)
}()
waitForCh(t, delegate.callbackCh, "provider/GenerateIntermediateCSR")
// Call UpdateConfiguration while RenewIntermediate is still in-flight to
// make sure we get an error about the state being occupied.
go func() {
require.EqualValues(t, caStateRenewIntermediate, manager.state)
require.Error(t, errors.New("already in state"), manager.UpdateConfiguration(&structs.CARequest{}))
}()
waitForCh(t, delegate.callbackCh, "forwardDC/ConnectCA.SignIntermediate")
waitForCh(t, delegate.callbackCh, "provider/SetIntermediate")
waitForCh(t, delegate.callbackCh, "raftApply/ConnectCA")
waitForEmptyCh(t, delegate.callbackCh)
// Make sure the RenewIntermediate call returned successfully.
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(CATestTimeout):
t.Fatal("never got result from errCh")
}
require.EqualValues(t, caStateInitialized, manager.state)
}
func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
args := []struct {
testName string
notBeforeRoot time.Time
notAfterRoot time.Time
notBeforeIntermediate time.Time
notAfterIntermediate time.Time
isError bool
errorMsg string
}{
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
// a cert that is not yet valid is ok, assume it will be valid soon enough
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
{"root in the future", time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
}
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
require.NoError(t, err, "failed to generate key")
for _, arg := range args {
t.Run(arg.testName, func(t *testing.T) {
// No parallel execution because we change globals
// Set the interval and drift buffer low for renewing the cert.
origInterval := structs.IntermediateCertRenewInterval
origDriftBuffer := ca.CertificateTimeDriftBuffer
defer func() {
structs.IntermediateCertRenewInterval = origInterval
ca.CertificateTimeDriftBuffer = origDriftBuffer
}()
structs.IntermediateCertRenewInterval = time.Millisecond
ca.CertificateTimeDriftBuffer = 0
conf := DefaultConfig()
conf.ConnectEnabled = true
conf.PrimaryDatacenter = "dc1"
conf.Datacenter = "dc2"
rootPEM := generateCertPEM(t, caPrivKey, arg.notBeforeRoot, arg.notAfterRoot)
intermediatePEM := generateCertPEM(t, caPrivKey, arg.notBeforeIntermediate, arg.notAfterIntermediate)
delegate := NewMockCAServerDelegate(t, conf)
delegate.primaryRoot.RootCert = rootPEM
delegate.secondaryIntermediate = intermediatePEM
manager := NewCAManager(delegate, nil, testutil.Logger(t), conf)
manager.providerShim = &mockCAProvider{
callbackCh: delegate.callbackCh,
rootPEM: rootPEM,
intermediatePem: intermediatePEM,
}
initTestManager(t, manager, delegate)
// Simulate Wait half the TTL for the cert to need renewing.
manager.timeNow = func() time.Time {
return time.Now().UTC().Add(500 * time.Millisecond)
}
// Call RenewIntermediate and then confirm the RPCs and provider calls
// happen in the expected order.
_, err := manager.SignCertificate(&x509.CertificateRequest{}, &connect.SpiffeIDAgent{})
if arg.isError {
require.Error(t, err)
require.Contains(t, err.Error(), arg.errorMsg)
} else {
require.NoError(t, err)
}
})
}
}
func generateCertPEM(t *testing.T, caPrivKey *rsa.PrivateKey, notBefore time.Time, notAfter time.Time) string {
t.Helper()
ca := &x509.Certificate{
SerialNumber: big.NewInt(2019),
Subject: pkix.Name{
Organization: []string{"Company, INC."},
Country: []string{"US"},
Province: []string{""},
Locality: []string{"San Francisco"},
StreetAddress: []string{"Golden Gate Bridge"},
PostalCode: []string{"94016"},
},
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
URIs: []*url.URL{connect.SpiffeIDAgent{Host: "foo"}.URI()},
}
caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey)
require.NoError(t, err, "failed to create cert")
caPEM := new(bytes.Buffer)
err = pem.Encode(caPEM, &pem.Block{
Type: "CERTIFICATE",
Bytes: caBytes,
})
require.NoError(t, err, "failed to encode")
return caPEM.String()
}
func TestCADelegateWithState_GenerateCASignRequest(t *testing.T) {
s := Server{config: &Config{PrimaryDatacenter: "east"}, tokens: new(token.Store)}
d := &caDelegateWithState{Server: &s}
req := d.generateCASignRequest("A")
require.Equal(t, "east", req.RequestDatacenter())
}
func TestCAManager_Initialize_Logging(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, conf1 := testServerConfig(t)
// Setup dummy logger to catch output
var buf bytes.Buffer
logger := testutil.LoggerWithOutput(t, &buf)
deps := newDefaultDeps(t, conf1)
deps.Logger = logger
s1, err := NewServer(conf1, deps)
require.NoError(t, err)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Wait til CA root is setup
retry.Run(t, func(r *retry.R) {
var out structs.IndexedCARoots
r.Check(s1.RPC("ConnectCA.Roots", structs.DCSpecificRequest{
Datacenter: conf1.Datacenter,
}, &out))
})
require.Contains(t, buf.String(), "consul CA provider configured")
}
func TestCAManager_UpdateConfiguration_Vault_Primary(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
_, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
defer func() {
s1.Shutdown()
s1.leaderRoutineManager.Wait()
}()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
_, origRoot, err := s1.fsm.State().CARootActive(nil)
require.NoError(t, err)
require.Len(t, origRoot.IntermediateCerts, 1)
cert, err := connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(origRoot))
require.NoError(t, err)
require.Equal(t, connect.HexString(cert.SubjectKeyId), origRoot.SigningKeyID)
err = s1.caManager.UpdateConfiguration(&structs.CARequest{
Config: &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-root-2/",
"IntermediatePKIPath": "pki-intermediate-2/",
},
},
})
require.NoError(t, err)
_, newRoot, err := s1.fsm.State().CARootActive(nil)
require.NoError(t, err)
require.Len(t, newRoot.IntermediateCerts, 2,
"expected one cross-sign cert and one local leaf sign cert")
require.NotEqual(t, origRoot.ID, newRoot.ID)
cert, err = connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(newRoot))
require.NoError(t, err)
require.Equal(t, connect.HexString(cert.SubjectKeyId), newRoot.SigningKeyID)
}
func TestCAManager_Initialize_Vault_WithIntermediateAsPrimaryCA(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
vclient := vault.Client()
generateExternalRootCA(t, vclient)
meshRootPath := "pki-root"
primaryCert := setupPrimaryCA(t, vclient, meshRootPath, "")
_, s1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": meshRootPath,
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
runStep(t, "check primary DC", func(t *testing.T) {
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
codec := rpcClient(t, s1)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Equal(t, primaryCert, roots.Roots[0].RootCert)
leafCertPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafCertPEM)
})
// TODO: renew primary leaf signing cert
// TODO: rotate root
runStep(t, "run secondary DC", func(t *testing.T) {
_, sDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": meshRootPath,
"IntermediatePKIPath": "pki-secondary/",
},
}
})
defer sDC2.Shutdown()
joinWAN(t, sDC2, s1)
testrpc.WaitForActiveCARoot(t, sDC2.RPC, "dc2", nil)
codec := rpcClient(t, sDC2)
roots := structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafCertPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafCertPEM)
// TODO: renew secondary leaf signing cert
})
}
func getLeafCert(t *testing.T, codec rpc.ClientCodec, trustDomain string, dc string) string {
pk, _, err := connect.GeneratePrivateKey()
require.NoError(t, err)
spiffeID := &connect.SpiffeIDService{
Host: trustDomain,
Service: "srv1",
Datacenter: dc,
}
csr, err := connect.CreateCSR(spiffeID, pk, nil, nil)
require.NoError(t, err)
req := structs.CASignRequest{CSR: csr}
cert := structs.IssuedCert{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err)
return cert.CertPEM
}
func TestCAManager_Initialize_Vault_WithExternalTrustedCA(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca.SkipIfVaultNotPresent(t)
vault := ca.NewTestVaultServer(t)
vclient := vault.Client()
rootPEM := generateExternalRootCA(t, vclient)
primaryCAPath := "pki-primary"
primaryCert := setupPrimaryCA(t, vclient, primaryCAPath, rootPEM)
_, serverDC1 := testServerWithConfig(t, func(c *Config) {
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": primaryCAPath,
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
testrpc.WaitForTestAgent(t, serverDC1.RPC, "dc1")
var origLeaf string
roots := structs.IndexedCARoots{}
runStep(t, "verify primary DC", func(t *testing.T) {
codec := rpcClient(t, serverDC1)
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Equal(t, primaryCert, roots.Roots[0].RootCert)
require.Contains(t, roots.Roots[0].RootCert, rootPEM)
leafCert := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafCert)
origLeaf = leafCert
})
_, serverDC2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "should-be-ignored",
"IntermediatePKIPath": "pki-secondary/",
},
}
})
var origLeafSecondary string
runStep(t, "start secondary DC", func(t *testing.T) {
joinWAN(t, serverDC2, serverDC1)
testrpc.WaitForActiveCARoot(t, serverDC2.RPC, "dc2", nil)
codec := rpcClient(t, serverDC2)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
origLeafSecondary = leafPEM
})
runStep(t, "renew leaf signing CA in primary", func(t *testing.T) {
previous := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
renewLeafSigningCert(t, serverDC1.caManager, serverDC1.caManager.primaryRenewIntermediate)
codec := rpcClient(t, serverDC1)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
require.Len(t, roots.Roots[0].IntermediateCerts, 2)
newCert := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
require.NotEqual(t, previous, newCert)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Roots[0], leafPEM)
// original certs from old signing cert should still verify
verifyLeafCert(t, roots.Roots[0], origLeaf)
})
runStep(t, "renew leaf signing CA in secondary", func(t *testing.T) {
previous := serverDC2.caManager.getLeafSigningCertFromRoot(roots.Active())
renewLeafSigningCert(t, serverDC2.caManager, serverDC2.caManager.secondaryRequestNewSigningCert)
codec := rpcClient(t, serverDC2)
roots = structs.IndexedCARoots{}
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 1)
// one intermediate from primary, two from secondary
require.Len(t, roots.Roots[0].IntermediateCerts, 3)
newCert := serverDC1.caManager.getLeafSigningCertFromRoot(roots.Active())
require.NotEqual(t, previous, newCert)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc2")
verifyLeafCert(t, roots.Roots[0], leafPEM)
// original certs from old signing cert should still verify
verifyLeafCert(t, roots.Roots[0], origLeaf)
})
runStep(t, "rotate root by changing the provider", func(t *testing.T) {
codec := rpcClient(t, serverDC1)
req := &structs.CARequest{
Op: structs.CAOpSetConfig,
Config: &structs.CAConfiguration{
Provider: "consul",
},
}
var resp error
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", req, &resp)
require.NoError(t, err)
require.Nil(t, resp)
roots = structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 2)
active := roots.Active()
require.Len(t, active.IntermediateCerts, 1)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafPEM)
// original certs from old root cert should still verify
verifyLeafCertWithRoots(t, roots, origLeaf)
// original certs from secondary should still verify
rootsSecondary := structs.IndexedCARoots{}
r := &structs.DCSpecificRequest{Datacenter: "dc2"}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", r, &rootsSecondary)
require.NoError(t, err)
verifyLeafCertWithRoots(t, rootsSecondary, origLeafSecondary)
})
runStep(t, "rotate to a different external root", func(t *testing.T) {
setupPrimaryCA(t, vclient, "pki-primary-2/", rootPEM)
codec := rpcClient(t, serverDC1)
req := &structs.CARequest{
Op: structs.CAOpSetConfig,
Config: &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": vault.Addr,
"Token": vault.RootToken,
"RootPKIPath": "pki-primary-2/",
"IntermediatePKIPath": "pki-intermediate-2/",
},
},
}
var resp error
err := msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", req, &resp)
require.NoError(t, err)
require.Nil(t, resp)
roots = structs.IndexedCARoots{}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", &structs.DCSpecificRequest{}, &roots)
require.NoError(t, err)
require.Len(t, roots.Roots, 3)
active := roots.Active()
require.Len(t, active.IntermediateCerts, 2)
leafPEM := getLeafCert(t, codec, roots.TrustDomain, "dc1")
verifyLeafCert(t, roots.Active(), leafPEM)
// original certs from old root cert should still verify
verifyLeafCertWithRoots(t, roots, origLeaf)
// original certs from secondary should still verify
rootsSecondary := structs.IndexedCARoots{}
r := &structs.DCSpecificRequest{Datacenter: "dc2"}
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", r, &rootsSecondary)
require.NoError(t, err)
verifyLeafCertWithRoots(t, rootsSecondary, origLeafSecondary)
})
}
// renewLeafSigningCert mimics RenewIntermediate. This is unfortunate, but
// necessary for now as there is no easy way to invoke that logic unconditionally.
// Currently, it requires patching values and polling for the operation to
// complete, which adds a lot of distractions to a test case.
// With this function we can instead unconditionally rotate the leaf signing cert
// synchronously.
func renewLeafSigningCert(t *testing.T, manager *CAManager, fn func(ca.Provider, *structs.CARoot) error) {
t.Helper()
provider, _ := manager.getCAProvider()
store := manager.delegate.State()
_, root, err := store.CARootActive(nil)
require.NoError(t, err)
activeRoot := root.Clone()
err = fn(provider, activeRoot)
require.NoError(t, err)
err = manager.persistNewRootAndConfig(provider, activeRoot, nil)
require.NoError(t, err)
manager.setCAProvider(provider, activeRoot)
}
func generateExternalRootCA(t *testing.T, client *vaultapi.Client) string {
t.Helper()
err := client.Sys().Mount("corp", &vaultapi.MountInput{
Type: "pki",
Description: "External root, probably corporate CA",
Config: vaultapi.MountConfigInput{
MaxLeaseTTL: "2400h",
DefaultLeaseTTL: "1h",
},
})
require.NoError(t, err, "failed to mount")
resp, err := client.Logical().Write("corp/root/generate/internal", map[string]interface{}{
"common_name": "corporate CA",
"ttl": "2400h",
})
require.NoError(t, err, "failed to generate root")
return ca.EnsureTrailingNewline(resp.Data["certificate"].(string))
}
func setupPrimaryCA(t *testing.T, client *vaultapi.Client, path string, rootPEM string) string {
t.Helper()
err := client.Sys().Mount(path, &vaultapi.MountInput{
Type: "pki",
Description: "primary CA for Consul CA",
Config: vaultapi.MountConfigInput{
MaxLeaseTTL: "2200h",
DefaultLeaseTTL: "1h",
},
})
require.NoError(t, err, "failed to mount")
out, err := client.Logical().Write(path+"/intermediate/generate/internal", map[string]interface{}{
"common_name": "primary CA",
"ttl": "2200h",
"key_type": "ec",
"key_bits": 256,
})
require.NoError(t, err, "failed to generate root")
intermediate, err := client.Logical().Write("corp/root/sign-intermediate", map[string]interface{}{
"csr": out.Data["csr"],
"use_csr_values": true,
"format": "pem_bundle",
"ttl": "2200h",
})
require.NoError(t, err, "failed to sign intermediate")
var buf strings.Builder
buf.WriteString(ca.EnsureTrailingNewline(intermediate.Data["certificate"].(string)))
buf.WriteString(ca.EnsureTrailingNewline(rootPEM))
_, err = client.Logical().Write(path+"/intermediate/set-signed", map[string]interface{}{
"certificate": buf.String(),
})
require.NoError(t, err, "failed to set signed intermediate")
return ca.EnsureTrailingNewline(buf.String())
}
|
package auth
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/transport"
)
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
// basic auth due to lack of credentials.
var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
// AuthenticationHandler is an interface for authorizing a request from
// params from a "WWW-Authenicate" header for a single scheme.
type AuthenticationHandler interface {
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
Scheme() string
// AuthorizeRequest adds the authorization header to a request (if needed)
// using the parameters from "WWW-Authenticate" method. The parameters
// values depend on the scheme.
AuthorizeRequest(req *http.Request, params map[string]string) error
}
// CredentialStore is an interface for getting credentials for
// a given URL
type CredentialStore interface {
// Basic returns basic auth for the given URL
Basic(*url.URL) (string, string)
// RefreshToken returns a refresh token for the
// given URL and service
RefreshToken(*url.URL, string) string
// SetRefreshToken sets the refresh token if none
// is provided for the given url and service
SetRefreshToken(realm *url.URL, service, token string)
}
// NewAuthorizer creates an authorizer which can handle multiple authentication
// schemes. The handlers are tried in order, the higher priority authentication
// methods should be first. The challengeMap holds a list of challenges for
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier {
return &endpointAuthorizer{
challenges: manager,
handlers: handlers,
}
}
type endpointAuthorizer struct {
challenges ChallengeManager
handlers []AuthenticationHandler
transport http.RoundTripper
}
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
v2Root := strings.Index(req.URL.Path, "/v2/")
if v2Root == -1 {
return nil
}
ping := url.URL{
Host: req.URL.Host,
Scheme: req.URL.Scheme,
Path: req.URL.Path[:v2Root+4],
}
pingEndpoint := ping.String()
challenges, err := ea.challenges.GetChallenges(pingEndpoint)
if err != nil {
return err
}
if len(challenges) > 0 {
for _, handler := range ea.handlers {
for _, challenge := range challenges {
if challenge.Scheme != handler.Scheme() {
continue
}
if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil {
return err
}
}
}
}
return nil
}
// This is the minimum duration a token can last (in seconds).
// A token must not live less than 60 seconds because older versions
// of the Docker client didn't read their expiration from the token
// response and assumed 60 seconds. So to remain compatible with
// those implementations, a token must live at least this long.
const minimumTokenLifetimeSeconds = 60
// Private interface for time used by this package to enable tests to provide their own implementation.
type clock interface {
Now() time.Time
}
type tokenHandler struct {
header http.Header
creds CredentialStore
transport http.RoundTripper
clock clock
offlineAccess bool
forceOAuth bool
clientID string
scopes []Scope
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
}
// Scope is a type which is serializable to a string
// using the allow scope grammar.
type Scope interface {
String() string
}
// RepositoryScope represents a token scope for access
// to a repository.
type RepositoryScope struct {
Repository string
Actions []string
}
// String returns the string representation of the repository
// using the scope grammar
func (rs RepositoryScope) String() string {
return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ","))
}
// TokenHandlerOptions is used to configure a new token handler
type TokenHandlerOptions struct {
Transport http.RoundTripper
Credentials CredentialStore
OfflineAccess bool
ForceOAuth bool
ClientID string
Scopes []Scope
}
// An implementation of clock for providing real time data.
type realClock struct{}
// Now implements clock
func (realClock) Now() time.Time { return time.Now() }
// NewTokenHandler creates a new AuthenicationHandler which supports
// fetching tokens from a remote token server.
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
// Create options...
return NewTokenHandlerWithOptions(TokenHandlerOptions{
Transport: transport,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: scope,
Actions: actions,
},
},
})
}
// NewTokenHandlerWithOptions creates a new token handler using the provided
// options structure.
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
handler := &tokenHandler{
transport: options.Transport,
creds: options.Credentials,
offlineAccess: options.OfflineAccess,
forceOAuth: options.ForceOAuth,
clientID: options.ClientID,
scopes: options.Scopes,
clock: realClock{},
}
return handler
}
func (th *tokenHandler) client() *http.Client {
return &http.Client{
Transport: th.transport,
Timeout: 15 * time.Second,
}
}
func (th *tokenHandler) Scheme() string {
return "bearer"
}
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
var additionalScopes []string
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
additionalScopes = append(additionalScopes, RepositoryScope{
Repository: fromParam,
Actions: []string{"pull"},
}.String())
}
token, err := th.getToken(params, additionalScopes...)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
th.tokenLock.Lock()
defer th.tokenLock.Unlock()
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
for _, scope := range th.scopes {
scopes = append(scopes, scope.String())
}
var addedScopes bool
for _, scope := range additionalScopes {
scopes = append(scopes, scope)
addedScopes = true
}
now := th.clock.Now()
if now.After(th.tokenExpiration) || addedScopes {
token, expiration, err := th.fetchToken(params, scopes)
if err != nil {
return "", err
}
// do not update cache for added scope tokens
if !addedScopes {
th.tokenCache = token
th.tokenExpiration = expiration
}
return token, nil
}
return th.tokenCache, nil
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
form := url.Values{}
form.Set("scope", strings.Join(scopes, " "))
form.Set("service", service)
clientID := th.clientID
if clientID == "" {
// Use default client, this is a required field
clientID = "registry-client"
}
form.Set("client_id", clientID)
if refreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", refreshToken)
} else if th.creds != nil {
form.Set("grant_type", "password")
username, password := th.creds.Basic(realm)
form.Set("username", username)
form.Set("password", password)
// attempt to get a refresh token
form.Set("access_type", "offline")
} else {
// refuse to do oauth without a grant type
return "", time.Time{}, fmt.Errorf("no supported grant type")
}
resp, err := th.client().PostForm(realm.String(), form)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
req, err := http.NewRequest("GET", realm.String(), nil)
if err != nil {
return "", time.Time{}, err
}
reqParams := req.URL.Query()
if service != "" {
reqParams.Add("service", service)
}
for _, scope := range scopes {
reqParams.Add("scope", scope)
}
if th.offlineAccess {
reqParams.Add("offline_token", "true")
}
if th.creds != nil {
username, password := th.creds.Basic(realm)
if username != "" && password != "" {
reqParams.Add("account", username)
req.SetBasicAuth(username, password)
}
}
req.URL.RawQuery = reqParams.Encode()
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && th.creds != nil {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", time.Time{}, errors.New("authorization server did not include a token in the response")
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
realm, ok := params["realm"]
if !ok {
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
}
// TODO(dmcgowan): Handle empty scheme and relative realm
realmURL, err := url.Parse(realm)
if err != nil {
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
}
service := params["service"]
var refreshToken string
if th.creds != nil {
refreshToken = th.creds.RefreshToken(realmURL, service)
}
if refreshToken != "" || th.forceOAuth {
return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
}
return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
}
type basicHandler struct {
creds CredentialStore
}
// NewBasicHandler creaters a new authentiation handler which adds
// basic authentication credentials to a request.
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
return &basicHandler{
creds: creds,
}
}
func (*basicHandler) Scheme() string {
return "basic"
}
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
if bh.creds != nil {
username, password := bh.creds.Basic(req.URL)
if username != "" && password != "" {
req.SetBasicAuth(username, password)
return nil
}
}
return ErrNoBasicAuthCredentials
}
Add client ID to token fetch to GET endpoint
Signed-off-by: Derek McGowan <e1c79a582b6629e6b39e9679f4bb964d25db4aa8@mcgstyle.net> (github: dmcgowan)
package auth
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/transport"
)
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
// basic auth due to lack of credentials.
var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
const defaultClientID = "registry-client"
// AuthenticationHandler is an interface for authorizing a request from
// params from a "WWW-Authenicate" header for a single scheme.
type AuthenticationHandler interface {
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
Scheme() string
// AuthorizeRequest adds the authorization header to a request (if needed)
// using the parameters from "WWW-Authenticate" method. The parameters
// values depend on the scheme.
AuthorizeRequest(req *http.Request, params map[string]string) error
}
// CredentialStore is an interface for getting credentials for
// a given URL
type CredentialStore interface {
// Basic returns basic auth for the given URL
Basic(*url.URL) (string, string)
// RefreshToken returns a refresh token for the
// given URL and service
RefreshToken(*url.URL, string) string
// SetRefreshToken sets the refresh token if none
// is provided for the given url and service
SetRefreshToken(realm *url.URL, service, token string)
}
// NewAuthorizer creates an authorizer which can handle multiple authentication
// schemes. The handlers are tried in order, the higher priority authentication
// methods should be first. The challengeMap holds a list of challenges for
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier {
return &endpointAuthorizer{
challenges: manager,
handlers: handlers,
}
}
type endpointAuthorizer struct {
challenges ChallengeManager
handlers []AuthenticationHandler
transport http.RoundTripper
}
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
v2Root := strings.Index(req.URL.Path, "/v2/")
if v2Root == -1 {
return nil
}
ping := url.URL{
Host: req.URL.Host,
Scheme: req.URL.Scheme,
Path: req.URL.Path[:v2Root+4],
}
pingEndpoint := ping.String()
challenges, err := ea.challenges.GetChallenges(pingEndpoint)
if err != nil {
return err
}
if len(challenges) > 0 {
for _, handler := range ea.handlers {
for _, challenge := range challenges {
if challenge.Scheme != handler.Scheme() {
continue
}
if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil {
return err
}
}
}
}
return nil
}
// This is the minimum duration a token can last (in seconds).
// A token must not live less than 60 seconds because older versions
// of the Docker client didn't read their expiration from the token
// response and assumed 60 seconds. So to remain compatible with
// those implementations, a token must live at least this long.
const minimumTokenLifetimeSeconds = 60
// Private interface for time used by this package to enable tests to provide their own implementation.
type clock interface {
Now() time.Time
}
type tokenHandler struct {
header http.Header
creds CredentialStore
transport http.RoundTripper
clock clock
offlineAccess bool
forceOAuth bool
clientID string
scopes []Scope
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
}
// Scope is a type which is serializable to a string
// using the allow scope grammar.
type Scope interface {
String() string
}
// RepositoryScope represents a token scope for access
// to a repository.
type RepositoryScope struct {
Repository string
Actions []string
}
// String returns the string representation of the repository
// using the scope grammar
func (rs RepositoryScope) String() string {
return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ","))
}
// TokenHandlerOptions is used to configure a new token handler
type TokenHandlerOptions struct {
Transport http.RoundTripper
Credentials CredentialStore
OfflineAccess bool
ForceOAuth bool
ClientID string
Scopes []Scope
}
// An implementation of clock for providing real time data.
type realClock struct{}
// Now implements clock
func (realClock) Now() time.Time { return time.Now() }
// NewTokenHandler creates a new AuthenicationHandler which supports
// fetching tokens from a remote token server.
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
// Create options...
return NewTokenHandlerWithOptions(TokenHandlerOptions{
Transport: transport,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: scope,
Actions: actions,
},
},
})
}
// NewTokenHandlerWithOptions creates a new token handler using the provided
// options structure.
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
handler := &tokenHandler{
transport: options.Transport,
creds: options.Credentials,
offlineAccess: options.OfflineAccess,
forceOAuth: options.ForceOAuth,
clientID: options.ClientID,
scopes: options.Scopes,
clock: realClock{},
}
return handler
}
func (th *tokenHandler) client() *http.Client {
return &http.Client{
Transport: th.transport,
Timeout: 15 * time.Second,
}
}
func (th *tokenHandler) Scheme() string {
return "bearer"
}
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
var additionalScopes []string
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
additionalScopes = append(additionalScopes, RepositoryScope{
Repository: fromParam,
Actions: []string{"pull"},
}.String())
}
token, err := th.getToken(params, additionalScopes...)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
th.tokenLock.Lock()
defer th.tokenLock.Unlock()
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
for _, scope := range th.scopes {
scopes = append(scopes, scope.String())
}
var addedScopes bool
for _, scope := range additionalScopes {
scopes = append(scopes, scope)
addedScopes = true
}
now := th.clock.Now()
if now.After(th.tokenExpiration) || addedScopes {
token, expiration, err := th.fetchToken(params, scopes)
if err != nil {
return "", err
}
// do not update cache for added scope tokens
if !addedScopes {
th.tokenCache = token
th.tokenExpiration = expiration
}
return token, nil
}
return th.tokenCache, nil
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
form := url.Values{}
form.Set("scope", strings.Join(scopes, " "))
form.Set("service", service)
clientID := th.clientID
if clientID == "" {
// Use default client, this is a required field
clientID = defaultClientID
}
form.Set("client_id", clientID)
if refreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", refreshToken)
} else if th.creds != nil {
form.Set("grant_type", "password")
username, password := th.creds.Basic(realm)
form.Set("username", username)
form.Set("password", password)
// attempt to get a refresh token
form.Set("access_type", "offline")
} else {
// refuse to do oauth without a grant type
return "", time.Time{}, fmt.Errorf("no supported grant type")
}
resp, err := th.client().PostForm(realm.String(), form)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
req, err := http.NewRequest("GET", realm.String(), nil)
if err != nil {
return "", time.Time{}, err
}
reqParams := req.URL.Query()
if service != "" {
reqParams.Add("service", service)
}
for _, scope := range scopes {
reqParams.Add("scope", scope)
}
if th.offlineAccess {
reqParams.Add("offline_token", "true")
clientID := th.clientID
if clientID == "" {
clientID = defaultClientID
}
reqParams.Add("client_id", clientID)
}
if th.creds != nil {
username, password := th.creds.Basic(realm)
if username != "" && password != "" {
reqParams.Add("account", username)
req.SetBasicAuth(username, password)
}
}
req.URL.RawQuery = reqParams.Encode()
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && th.creds != nil {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", time.Time{}, errors.New("authorization server did not include a token in the response")
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
realm, ok := params["realm"]
if !ok {
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
}
// TODO(dmcgowan): Handle empty scheme and relative realm
realmURL, err := url.Parse(realm)
if err != nil {
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
}
service := params["service"]
var refreshToken string
if th.creds != nil {
refreshToken = th.creds.RefreshToken(realmURL, service)
}
if refreshToken != "" || th.forceOAuth {
return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
}
return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
}
type basicHandler struct {
creds CredentialStore
}
// NewBasicHandler creaters a new authentiation handler which adds
// basic authentication credentials to a request.
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
return &basicHandler{
creds: creds,
}
}
func (*basicHandler) Scheme() string {
return "basic"
}
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
if bh.creds != nil {
username, password := bh.creds.Basic(req.URL)
if username != "" && password != "" {
req.SetBasicAuth(username, password)
return nil
}
}
return ErrNoBasicAuthCredentials
}
|
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableChecks() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNodeService: {
read: indexValue{
source: NodeServiceQuery{
Node: "NoDe",
Service: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
},
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
func testIndexerTableMeshTopology() map[string]indexerTestCase {
obj := upstreamDownstream{
Upstream: structs.ServiceName{Name: "UpStReAm"},
Downstream: structs.ServiceName{Name: "DownStream"},
}
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: []interface{}{
structs.ServiceName{Name: "UpStReAm"},
structs.ServiceName{Name: "DownStream"},
},
expected: []byte("upstream\x00downstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("upstream\x00downstream\x00"),
},
},
indexUpstream: {
read: indexValue{
source: structs.ServiceName{Name: "UpStReAm"},
expected: []byte("upstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("upstream\x00"),
},
},
indexDownstream: {
read: indexValue{
source: structs.ServiceName{Name: "DownStream"},
expected: []byte("downstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("downstream\x00"),
},
},
}
}
func testIndexerTableGatewayServices() map[string]indexerTestCase {
obj := &structs.GatewayService{
Gateway: structs.ServiceName{Name: "GateWay"},
Service: structs.ServiceName{Name: "SerVice"},
Port: 50123,
}
encodedPort := string([]byte{0x96, 0x8f, 0x06, 0, 0, 0, 0, 0, 0, 0})
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: []interface{}{
structs.ServiceName{Name: "GateWay"},
structs.ServiceName{Name: "SerVice"},
50123,
},
expected: []byte("gateway\x00service\x00" + encodedPort),
},
write: indexValue{
source: obj,
expected: []byte("gateway\x00service\x00" + encodedPort),
},
},
indexGateway: {
read: indexValue{
source: structs.ServiceName{Name: "GateWay"},
expected: []byte("gateway\x00"),
},
write: indexValue{
source: obj,
expected: []byte("gateway\x00"),
},
},
indexService: {
read: indexValue{
source: structs.ServiceName{Name: "SerVice"},
expected: []byte("service\x00"),
},
write: indexValue{
source: obj,
expected: []byte("service\x00"),
},
},
}
}
func testIndexerTableNodes() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: Query{Value: "NoDeId"},
expected: []byte("nodeid\x00"),
},
write: indexValue{
source: &structs.Node{Node: "NoDeId"},
expected: []byte("nodeid\x00"),
},
},
}
}
func testIndexerTableServices() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.ServiceNode{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
state: add indexer test for services.ID index
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableChecks() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNodeService: {
read: indexValue{
source: NodeServiceQuery{
Node: "NoDe",
Service: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
},
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
func testIndexerTableMeshTopology() map[string]indexerTestCase {
obj := upstreamDownstream{
Upstream: structs.ServiceName{Name: "UpStReAm"},
Downstream: structs.ServiceName{Name: "DownStream"},
}
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: []interface{}{
structs.ServiceName{Name: "UpStReAm"},
structs.ServiceName{Name: "DownStream"},
},
expected: []byte("upstream\x00downstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("upstream\x00downstream\x00"),
},
},
indexUpstream: {
read: indexValue{
source: structs.ServiceName{Name: "UpStReAm"},
expected: []byte("upstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("upstream\x00"),
},
},
indexDownstream: {
read: indexValue{
source: structs.ServiceName{Name: "DownStream"},
expected: []byte("downstream\x00"),
},
write: indexValue{
source: obj,
expected: []byte("downstream\x00"),
},
},
}
}
func testIndexerTableGatewayServices() map[string]indexerTestCase {
obj := &structs.GatewayService{
Gateway: structs.ServiceName{Name: "GateWay"},
Service: structs.ServiceName{Name: "SerVice"},
Port: 50123,
}
encodedPort := string([]byte{0x96, 0x8f, 0x06, 0, 0, 0, 0, 0, 0, 0})
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: []interface{}{
structs.ServiceName{Name: "GateWay"},
structs.ServiceName{Name: "SerVice"},
50123,
},
expected: []byte("gateway\x00service\x00" + encodedPort),
},
write: indexValue{
source: obj,
expected: []byte("gateway\x00service\x00" + encodedPort),
},
},
indexGateway: {
read: indexValue{
source: structs.ServiceName{Name: "GateWay"},
expected: []byte("gateway\x00"),
},
write: indexValue{
source: obj,
expected: []byte("gateway\x00"),
},
},
indexService: {
read: indexValue{
source: structs.ServiceName{Name: "SerVice"},
expected: []byte("service\x00"),
},
write: indexValue{
source: obj,
expected: []byte("service\x00"),
},
},
}
}
func testIndexerTableNodes() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: Query{Value: "NoDeId"},
expected: []byte("nodeid\x00"),
},
write: indexValue{
source: &structs.Node{Node: "NoDeId"},
expected: []byte("nodeid\x00"),
},
},
}
}
func testIndexerTableServices() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: NodeServiceQuery{
Node: "NoDeId",
Service: "SeRvIcE",
},
expected: []byte("nodeid\x00service\x00"),
},
write: indexValue{
source: &structs.ServiceNode{
Node: "NoDeId",
ServiceID: "SeRviCe",
},
expected: []byte("nodeid\x00service\x00"),
},
prefix: []indexValue{
{
source: (*structs.EnterpriseMeta)(nil),
expected: nil,
},
{
source: structs.EnterpriseMeta{},
expected: nil,
},
{
source: Query{Value: "NoDeId"},
expected: []byte("nodeid\x00"),
},
},
},
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.ServiceNode{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
|
package structs
import (
"fmt"
"sort"
"strings"
"github.com/miekg/dns"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/lib/stringslice"
)
// IngressGatewayConfigEntry manages the configuration for an ingress service
// with the given name.
type IngressGatewayConfigEntry struct {
// Kind of the config entry. This will be set to structs.IngressGateway.
Kind string
// Name is used to match the config entry with its associated ingress gateway
// service. This should match the name provided in the service definition.
Name string
// TLS holds the TLS configuration for this gateway.
TLS GatewayTLSConfig
// Listeners declares what ports the ingress gateway should listen on, and
// what services to associated to those ports.
Listeners []IngressListener
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
RaftIndex
}
type IngressListener struct {
// Port declares the port on which the ingress gateway should listen for traffic.
Port int
// Protocol declares what type of traffic this listener is expected to
// receive. Depending on the protocol, a listener might support multiplexing
// services over a single port, or additional discovery chain features. The
// current supported values are: (tcp | http | http2 | grpc).
Protocol string
// Services declares the set of services to which the listener forwards
// traffic.
//
// For "tcp" protocol listeners, only a single service is allowed.
// For "http" listeners, multiple services can be declared.
Services []IngressService
}
type IngressService struct {
// Name declares the service to which traffic should be forwarded.
//
// This can either be a specific service, or the wildcard specifier,
// "*". If the wildcard specifier is provided, the listener must be of "http"
// protocol and means that the listener will forward traffic to all services.
//
// A name can be specified on multiple listeners, and will be exposed on both
// of the listeners
Name string
// Hosts is a list of hostnames which should be associated to this service on
// the defined listener. Only allowed on layer 7 protocols, this will be used
// to route traffic to the service by matching the Host header of the HTTP
// request.
//
// If a host is provided for a service that also has a wildcard specifier
// defined, the host will override the wildcard-specifier-provided
// "<service-name>.*" domain for that listener.
//
// This cannot be specified when using the wildcard specifier, "*", or when
// using a "tcp" listener.
Hosts []string
// Allow HTTP header manipulation to be configured.
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
type GatewayTLSConfig struct {
// Indicates that TLS should be enabled for this gateway service
Enabled bool
}
func (e *IngressGatewayConfigEntry) GetKind() string {
return IngressGateway
}
func (e *IngressGatewayConfigEntry) GetName() string {
if e == nil {
return ""
}
return e.Name
}
func (e *IngressGatewayConfigEntry) GetMeta() map[string]string {
if e == nil {
return nil
}
return e.Meta
}
func (e *IngressGatewayConfigEntry) Normalize() error {
if e == nil {
return fmt.Errorf("config entry is nil")
}
e.Kind = IngressGateway
e.EnterpriseMeta.Normalize()
for i, listener := range e.Listeners {
if listener.Protocol == "" {
listener.Protocol = "tcp"
}
listener.Protocol = strings.ToLower(listener.Protocol)
for i := range listener.Services {
listener.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta)
listener.Services[i].EnterpriseMeta.Normalize()
}
// Make sure to set the item back into the array, since we are not using
// pointers to structs
e.Listeners[i] = listener
}
return nil
}
func (e *IngressGatewayConfigEntry) Validate() error {
if err := validateConfigEntryMeta(e.Meta); err != nil {
return err
}
validProtocols := map[string]bool{
"tcp": true,
"http": true,
"http2": true,
"grpc": true,
}
declaredPorts := make(map[int]bool)
for _, listener := range e.Listeners {
if _, ok := declaredPorts[listener.Port]; ok {
return fmt.Errorf("port %d declared on two listeners", listener.Port)
}
declaredPorts[listener.Port] = true
if _, ok := validProtocols[listener.Protocol]; !ok {
return fmt.Errorf("protocol must be 'tcp', 'http', 'http2', or 'grpc'. '%s' is an unsupported protocol", listener.Protocol)
}
if len(listener.Services) == 0 {
return fmt.Errorf("No service declared for listener with port %d", listener.Port)
}
// Validate that http features aren't being used with tcp or another non-supported protocol.
if listener.Protocol != "http" && len(listener.Services) > 1 {
return fmt.Errorf("Multiple services per listener are only supported for protocol = 'http' (listener on port %d)",
listener.Port)
}
declaredHosts := make(map[string]bool)
serviceNames := make(map[ServiceID]struct{})
for i, s := range listener.Services {
if err := validateInnerEnterpriseMeta(&s.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
return fmt.Errorf("Services[%d].%v", i, err)
}
if err := s.RequestHeaders.Validate(listener.Protocol); err != nil {
return fmt.Errorf("request headers %s (service %q on listener on port %d)", err, s.Name, listener.Port)
}
if err := s.ResponseHeaders.Validate(listener.Protocol); err != nil {
return fmt.Errorf("response headers %s (service %q on listener on port %d)", err, s.Name, listener.Port)
}
if listener.Protocol == "tcp" {
if s.Name == WildcardSpecifier {
return fmt.Errorf("Wildcard service name is only valid for protocol = 'http' (listener on port %d)", listener.Port)
}
if len(s.Hosts) != 0 {
return fmt.Errorf("Associating hosts to a service is not supported for the %s protocol (listener on port %d)", listener.Protocol, listener.Port)
}
}
if s.Name == "" {
return fmt.Errorf("Service name cannot be blank (listener on port %d)", listener.Port)
}
if s.Name == WildcardSpecifier && len(s.Hosts) != 0 {
return fmt.Errorf("Associating hosts to a wildcard service is not supported (listener on port %d)", listener.Port)
}
if s.NamespaceOrDefault() == WildcardSpecifier {
return fmt.Errorf("Wildcard namespace is not supported for ingress services (listener on port %d)", listener.Port)
}
sid := NewServiceID(s.Name, &s.EnterpriseMeta)
if _, ok := serviceNames[sid]; ok {
return fmt.Errorf("Service %s cannot be added multiple times (listener on port %d)", sid, listener.Port)
}
serviceNames[sid] = struct{}{}
for _, h := range s.Hosts {
if declaredHosts[h] {
return fmt.Errorf("Hosts must be unique within a specific listener (listener on port %d)", listener.Port)
}
declaredHosts[h] = true
if err := validateHost(e.TLS.Enabled, h); err != nil {
return err
}
}
}
}
return nil
}
func validateHost(tlsEnabled bool, host string) error {
// Special case '*' so that non-TLS ingress gateways can use it. This allows
// an easy demo/testing experience.
if host == "*" {
if tlsEnabled {
return fmt.Errorf("Host '*' is not allowed when TLS is enabled, all hosts must be valid DNS records to add as a DNSSAN")
}
return nil
}
wildcardPrefix := "*."
if _, ok := dns.IsDomainName(host); !ok {
return fmt.Errorf("Host %q must be a valid DNS hostname", host)
}
if strings.ContainsRune(strings.TrimPrefix(host, wildcardPrefix), '*') {
return fmt.Errorf("Host %q is not valid, a wildcard specifier is only allowed as the leftmost label", host)
}
return nil
}
// ListRelatedServices implements discoveryChainConfigEntry
//
// For ingress-gateway config entries this only finds services that are
// explicitly linked in the ingress-gateway config entry. Wildcards will not
// expand to all services.
//
// This function is used during discovery chain graph validation to prevent
// erroneous sets of config entries from being created. Wildcard ingress
// filters out sets with protocol mismatch elsewhere so it isn't an issue here
// that needs fixing.
func (e *IngressGatewayConfigEntry) ListRelatedServices() []ServiceID {
found := make(map[ServiceID]struct{})
for _, listener := range e.Listeners {
for _, service := range listener.Services {
if service.Name == WildcardSpecifier {
continue
}
svcID := NewServiceID(service.Name, &service.EnterpriseMeta)
found[svcID] = struct{}{}
}
}
if len(found) == 0 {
return nil
}
out := make([]ServiceID, 0, len(found))
for svc := range found {
out = append(out, svc)
}
sort.Slice(out, func(i, j int) bool {
return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) ||
out[i].ID < out[j].ID
})
return out
}
func (e *IngressGatewayConfigEntry) CanRead(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.ServiceRead(e.Name, &authzContext) == acl.Allow
}
func (e *IngressGatewayConfigEntry) CanWrite(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.MeshWrite(&authzContext) == acl.Allow
}
func (e *IngressGatewayConfigEntry) GetRaftIndex() *RaftIndex {
if e == nil {
return &RaftIndex{}
}
return &e.RaftIndex
}
func (e *IngressGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta {
if e == nil {
return nil
}
return &e.EnterpriseMeta
}
func (s *IngressService) ToServiceName() ServiceName {
return NewServiceName(s.Name, &s.EnterpriseMeta)
}
// TerminatingGatewayConfigEntry manages the configuration for a terminating service
// with the given name.
type TerminatingGatewayConfigEntry struct {
Kind string
Name string
Services []LinkedService
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
RaftIndex
}
// A LinkedService is a service represented by a terminating gateway
type LinkedService struct {
// Name is the name of the service, as defined in Consul's catalog
Name string `json:",omitempty"`
// CAFile is the optional path to a CA certificate to use for TLS connections
// from the gateway to the linked service
CAFile string `json:",omitempty" alias:"ca_file"`
// CertFile is the optional path to a client certificate to use for TLS connections
// from the gateway to the linked service
CertFile string `json:",omitempty" alias:"cert_file"`
// KeyFile is the optional path to a private key to use for TLS connections
// from the gateway to the linked service
KeyFile string `json:",omitempty" alias:"key_file"`
// SNI is the optional name to specify during the TLS handshake with a linked service
SNI string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
func (e *TerminatingGatewayConfigEntry) GetKind() string {
return TerminatingGateway
}
func (e *TerminatingGatewayConfigEntry) GetName() string {
if e == nil {
return ""
}
return e.Name
}
func (e *TerminatingGatewayConfigEntry) GetMeta() map[string]string {
if e == nil {
return nil
}
return e.Meta
}
func (e *TerminatingGatewayConfigEntry) Normalize() error {
if e == nil {
return fmt.Errorf("config entry is nil")
}
e.Kind = TerminatingGateway
e.EnterpriseMeta.Normalize()
for i := range e.Services {
e.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta)
e.Services[i].EnterpriseMeta.Normalize()
}
return nil
}
func (e *TerminatingGatewayConfigEntry) Validate() error {
if err := validateConfigEntryMeta(e.Meta); err != nil {
return err
}
seen := make(map[ServiceID]bool)
for _, svc := range e.Services {
if svc.Name == "" {
return fmt.Errorf("Service name cannot be blank.")
}
ns := svc.NamespaceOrDefault()
if ns == WildcardSpecifier {
return fmt.Errorf("Wildcard namespace is not supported for terminating gateway services")
}
cid := NewServiceID(svc.Name, &svc.EnterpriseMeta)
if err := validateInnerEnterpriseMeta(&svc.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
return fmt.Errorf("Service %q: %v", cid.String(), err)
}
// Check for duplicates within the entry
if ok := seen[cid]; ok {
return fmt.Errorf("Service %q was specified more than once within a namespace", cid.String())
}
seen[cid] = true
// If either client cert config file was specified then the CA file, client cert, and key file must be specified
// Specifying only a CAFile is allowed for one-way TLS
if (svc.CertFile != "" || svc.KeyFile != "") &&
!(svc.CAFile != "" && svc.CertFile != "" && svc.KeyFile != "") {
return fmt.Errorf("Service %q must have a CertFile, CAFile, and KeyFile specified for TLS origination", svc.Name)
}
}
return nil
}
func (e *TerminatingGatewayConfigEntry) CanRead(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.ServiceRead(e.Name, &authzContext) == acl.Allow
}
func (e *TerminatingGatewayConfigEntry) CanWrite(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.MeshWrite(&authzContext) == acl.Allow
}
func (e *TerminatingGatewayConfigEntry) GetRaftIndex() *RaftIndex {
if e == nil {
return &RaftIndex{}
}
return &e.RaftIndex
}
func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta {
if e == nil {
return nil
}
return &e.EnterpriseMeta
}
// GatewayService is used to associate gateways with their linked services.
type GatewayService struct {
Gateway ServiceName
Service ServiceName
GatewayKind ServiceKind
Port int `json:",omitempty"`
Protocol string `json:",omitempty"`
Hosts []string `json:",omitempty"`
CAFile string `json:",omitempty"`
CertFile string `json:",omitempty"`
KeyFile string `json:",omitempty"`
SNI string `json:",omitempty"`
FromWildcard bool `json:",omitempty"`
RaftIndex
}
type GatewayServices []*GatewayService
func (g *GatewayService) Addresses(defaultHosts []string) []string {
if g.Port == 0 {
return nil
}
hosts := g.Hosts
if len(hosts) == 0 {
hosts = defaultHosts
}
var addresses []string
// loop through the hosts and format that into domain.name:port format,
// ensuring we trim any trailing DNS . characters from the domain name as we
// go
for _, h := range hosts {
addresses = append(addresses, fmt.Sprintf("%s:%d", strings.TrimRight(h, "."), g.Port))
}
return addresses
}
func (g *GatewayService) IsSame(o *GatewayService) bool {
return g.Gateway.Matches(o.Gateway) &&
g.Service.Matches(o.Service) &&
g.GatewayKind == o.GatewayKind &&
g.Port == o.Port &&
g.Protocol == o.Protocol &&
stringslice.Equal(g.Hosts, o.Hosts) &&
g.CAFile == o.CAFile &&
g.CertFile == o.CertFile &&
g.KeyFile == o.KeyFile &&
g.SNI == o.SNI &&
g.FromWildcard == o.FromWildcard
}
func (g *GatewayService) Clone() *GatewayService {
return &GatewayService{
Gateway: g.Gateway,
Service: g.Service,
GatewayKind: g.GatewayKind,
Port: g.Port,
Protocol: g.Protocol,
// See https://github.com/go101/go101/wiki/How-to-efficiently-clone-a-slice%3F
Hosts: append(g.Hosts[:0:0], g.Hosts...),
CAFile: g.CAFile,
CertFile: g.CertFile,
KeyFile: g.KeyFile,
SNI: g.SNI,
FromWildcard: g.FromWildcard,
RaftIndex: g.RaftIndex,
}
}
Include namespace and partition in error messages when validating ingress header manip
package structs
import (
"fmt"
"sort"
"strings"
"github.com/miekg/dns"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/lib/stringslice"
)
// IngressGatewayConfigEntry manages the configuration for an ingress service
// with the given name.
type IngressGatewayConfigEntry struct {
// Kind of the config entry. This will be set to structs.IngressGateway.
Kind string
// Name is used to match the config entry with its associated ingress gateway
// service. This should match the name provided in the service definition.
Name string
// TLS holds the TLS configuration for this gateway.
TLS GatewayTLSConfig
// Listeners declares what ports the ingress gateway should listen on, and
// what services to associated to those ports.
Listeners []IngressListener
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
RaftIndex
}
type IngressListener struct {
// Port declares the port on which the ingress gateway should listen for traffic.
Port int
// Protocol declares what type of traffic this listener is expected to
// receive. Depending on the protocol, a listener might support multiplexing
// services over a single port, or additional discovery chain features. The
// current supported values are: (tcp | http | http2 | grpc).
Protocol string
// Services declares the set of services to which the listener forwards
// traffic.
//
// For "tcp" protocol listeners, only a single service is allowed.
// For "http" listeners, multiple services can be declared.
Services []IngressService
}
type IngressService struct {
// Name declares the service to which traffic should be forwarded.
//
// This can either be a specific service, or the wildcard specifier,
// "*". If the wildcard specifier is provided, the listener must be of "http"
// protocol and means that the listener will forward traffic to all services.
//
// A name can be specified on multiple listeners, and will be exposed on both
// of the listeners
Name string
// Hosts is a list of hostnames which should be associated to this service on
// the defined listener. Only allowed on layer 7 protocols, this will be used
// to route traffic to the service by matching the Host header of the HTTP
// request.
//
// If a host is provided for a service that also has a wildcard specifier
// defined, the host will override the wildcard-specifier-provided
// "<service-name>.*" domain for that listener.
//
// This cannot be specified when using the wildcard specifier, "*", or when
// using a "tcp" listener.
Hosts []string
// Allow HTTP header manipulation to be configured.
RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"`
ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"`
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
type GatewayTLSConfig struct {
// Indicates that TLS should be enabled for this gateway service
Enabled bool
}
func (e *IngressGatewayConfigEntry) GetKind() string {
return IngressGateway
}
func (e *IngressGatewayConfigEntry) GetName() string {
if e == nil {
return ""
}
return e.Name
}
func (e *IngressGatewayConfigEntry) GetMeta() map[string]string {
if e == nil {
return nil
}
return e.Meta
}
func (e *IngressGatewayConfigEntry) Normalize() error {
if e == nil {
return fmt.Errorf("config entry is nil")
}
e.Kind = IngressGateway
e.EnterpriseMeta.Normalize()
for i, listener := range e.Listeners {
if listener.Protocol == "" {
listener.Protocol = "tcp"
}
listener.Protocol = strings.ToLower(listener.Protocol)
for i := range listener.Services {
listener.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta)
listener.Services[i].EnterpriseMeta.Normalize()
}
// Make sure to set the item back into the array, since we are not using
// pointers to structs
e.Listeners[i] = listener
}
return nil
}
func (e *IngressGatewayConfigEntry) Validate() error {
if err := validateConfigEntryMeta(e.Meta); err != nil {
return err
}
validProtocols := map[string]bool{
"tcp": true,
"http": true,
"http2": true,
"grpc": true,
}
declaredPorts := make(map[int]bool)
for _, listener := range e.Listeners {
if _, ok := declaredPorts[listener.Port]; ok {
return fmt.Errorf("port %d declared on two listeners", listener.Port)
}
declaredPorts[listener.Port] = true
if _, ok := validProtocols[listener.Protocol]; !ok {
return fmt.Errorf("protocol must be 'tcp', 'http', 'http2', or 'grpc'. '%s' is an unsupported protocol", listener.Protocol)
}
if len(listener.Services) == 0 {
return fmt.Errorf("No service declared for listener with port %d", listener.Port)
}
// Validate that http features aren't being used with tcp or another non-supported protocol.
if listener.Protocol != "http" && len(listener.Services) > 1 {
return fmt.Errorf("Multiple services per listener are only supported for protocol = 'http' (listener on port %d)",
listener.Port)
}
declaredHosts := make(map[string]bool)
serviceNames := make(map[ServiceID]struct{})
for i, s := range listener.Services {
if err := validateInnerEnterpriseMeta(&s.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
return fmt.Errorf("Services[%d].%v", i, err)
}
sn := NewServiceName(s.Name, &s.EnterpriseMeta)
if err := s.RequestHeaders.Validate(listener.Protocol); err != nil {
return fmt.Errorf("request headers %s (service %q on listener on port %d)", err, sn.String(), listener.Port)
}
if err := s.ResponseHeaders.Validate(listener.Protocol); err != nil {
return fmt.Errorf("response headers %s (service %q on listener on port %d)", err, sn.String(), listener.Port)
}
if listener.Protocol == "tcp" {
if s.Name == WildcardSpecifier {
return fmt.Errorf("Wildcard service name is only valid for protocol = 'http' (listener on port %d)", listener.Port)
}
if len(s.Hosts) != 0 {
return fmt.Errorf("Associating hosts to a service is not supported for the %s protocol (listener on port %d)", listener.Protocol, listener.Port)
}
}
if s.Name == "" {
return fmt.Errorf("Service name cannot be blank (listener on port %d)", listener.Port)
}
if s.Name == WildcardSpecifier && len(s.Hosts) != 0 {
return fmt.Errorf("Associating hosts to a wildcard service is not supported (listener on port %d)", listener.Port)
}
if s.NamespaceOrDefault() == WildcardSpecifier {
return fmt.Errorf("Wildcard namespace is not supported for ingress services (listener on port %d)", listener.Port)
}
sid := NewServiceID(s.Name, &s.EnterpriseMeta)
if _, ok := serviceNames[sid]; ok {
return fmt.Errorf("Service %s cannot be added multiple times (listener on port %d)", sid, listener.Port)
}
serviceNames[sid] = struct{}{}
for _, h := range s.Hosts {
if declaredHosts[h] {
return fmt.Errorf("Hosts must be unique within a specific listener (listener on port %d)", listener.Port)
}
declaredHosts[h] = true
if err := validateHost(e.TLS.Enabled, h); err != nil {
return err
}
}
}
}
return nil
}
func validateHost(tlsEnabled bool, host string) error {
// Special case '*' so that non-TLS ingress gateways can use it. This allows
// an easy demo/testing experience.
if host == "*" {
if tlsEnabled {
return fmt.Errorf("Host '*' is not allowed when TLS is enabled, all hosts must be valid DNS records to add as a DNSSAN")
}
return nil
}
wildcardPrefix := "*."
if _, ok := dns.IsDomainName(host); !ok {
return fmt.Errorf("Host %q must be a valid DNS hostname", host)
}
if strings.ContainsRune(strings.TrimPrefix(host, wildcardPrefix), '*') {
return fmt.Errorf("Host %q is not valid, a wildcard specifier is only allowed as the leftmost label", host)
}
return nil
}
// ListRelatedServices implements discoveryChainConfigEntry
//
// For ingress-gateway config entries this only finds services that are
// explicitly linked in the ingress-gateway config entry. Wildcards will not
// expand to all services.
//
// This function is used during discovery chain graph validation to prevent
// erroneous sets of config entries from being created. Wildcard ingress
// filters out sets with protocol mismatch elsewhere so it isn't an issue here
// that needs fixing.
func (e *IngressGatewayConfigEntry) ListRelatedServices() []ServiceID {
found := make(map[ServiceID]struct{})
for _, listener := range e.Listeners {
for _, service := range listener.Services {
if service.Name == WildcardSpecifier {
continue
}
svcID := NewServiceID(service.Name, &service.EnterpriseMeta)
found[svcID] = struct{}{}
}
}
if len(found) == 0 {
return nil
}
out := make([]ServiceID, 0, len(found))
for svc := range found {
out = append(out, svc)
}
sort.Slice(out, func(i, j int) bool {
return out[i].EnterpriseMeta.LessThan(&out[j].EnterpriseMeta) ||
out[i].ID < out[j].ID
})
return out
}
func (e *IngressGatewayConfigEntry) CanRead(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.ServiceRead(e.Name, &authzContext) == acl.Allow
}
func (e *IngressGatewayConfigEntry) CanWrite(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.MeshWrite(&authzContext) == acl.Allow
}
func (e *IngressGatewayConfigEntry) GetRaftIndex() *RaftIndex {
if e == nil {
return &RaftIndex{}
}
return &e.RaftIndex
}
func (e *IngressGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta {
if e == nil {
return nil
}
return &e.EnterpriseMeta
}
func (s *IngressService) ToServiceName() ServiceName {
return NewServiceName(s.Name, &s.EnterpriseMeta)
}
// TerminatingGatewayConfigEntry manages the configuration for a terminating service
// with the given name.
type TerminatingGatewayConfigEntry struct {
Kind string
Name string
Services []LinkedService
Meta map[string]string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
RaftIndex
}
// A LinkedService is a service represented by a terminating gateway
type LinkedService struct {
// Name is the name of the service, as defined in Consul's catalog
Name string `json:",omitempty"`
// CAFile is the optional path to a CA certificate to use for TLS connections
// from the gateway to the linked service
CAFile string `json:",omitempty" alias:"ca_file"`
// CertFile is the optional path to a client certificate to use for TLS connections
// from the gateway to the linked service
CertFile string `json:",omitempty" alias:"cert_file"`
// KeyFile is the optional path to a private key to use for TLS connections
// from the gateway to the linked service
KeyFile string `json:",omitempty" alias:"key_file"`
// SNI is the optional name to specify during the TLS handshake with a linked service
SNI string `json:",omitempty"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
func (e *TerminatingGatewayConfigEntry) GetKind() string {
return TerminatingGateway
}
func (e *TerminatingGatewayConfigEntry) GetName() string {
if e == nil {
return ""
}
return e.Name
}
func (e *TerminatingGatewayConfigEntry) GetMeta() map[string]string {
if e == nil {
return nil
}
return e.Meta
}
func (e *TerminatingGatewayConfigEntry) Normalize() error {
if e == nil {
return fmt.Errorf("config entry is nil")
}
e.Kind = TerminatingGateway
e.EnterpriseMeta.Normalize()
for i := range e.Services {
e.Services[i].EnterpriseMeta.Merge(&e.EnterpriseMeta)
e.Services[i].EnterpriseMeta.Normalize()
}
return nil
}
func (e *TerminatingGatewayConfigEntry) Validate() error {
if err := validateConfigEntryMeta(e.Meta); err != nil {
return err
}
seen := make(map[ServiceID]bool)
for _, svc := range e.Services {
if svc.Name == "" {
return fmt.Errorf("Service name cannot be blank.")
}
ns := svc.NamespaceOrDefault()
if ns == WildcardSpecifier {
return fmt.Errorf("Wildcard namespace is not supported for terminating gateway services")
}
cid := NewServiceID(svc.Name, &svc.EnterpriseMeta)
if err := validateInnerEnterpriseMeta(&svc.EnterpriseMeta, &e.EnterpriseMeta); err != nil {
return fmt.Errorf("Service %q: %v", cid.String(), err)
}
// Check for duplicates within the entry
if ok := seen[cid]; ok {
return fmt.Errorf("Service %q was specified more than once within a namespace", cid.String())
}
seen[cid] = true
// If either client cert config file was specified then the CA file, client cert, and key file must be specified
// Specifying only a CAFile is allowed for one-way TLS
if (svc.CertFile != "" || svc.KeyFile != "") &&
!(svc.CAFile != "" && svc.CertFile != "" && svc.KeyFile != "") {
return fmt.Errorf("Service %q must have a CertFile, CAFile, and KeyFile specified for TLS origination", svc.Name)
}
}
return nil
}
func (e *TerminatingGatewayConfigEntry) CanRead(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.ServiceRead(e.Name, &authzContext) == acl.Allow
}
func (e *TerminatingGatewayConfigEntry) CanWrite(authz acl.Authorizer) bool {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
return authz.MeshWrite(&authzContext) == acl.Allow
}
func (e *TerminatingGatewayConfigEntry) GetRaftIndex() *RaftIndex {
if e == nil {
return &RaftIndex{}
}
return &e.RaftIndex
}
func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta {
if e == nil {
return nil
}
return &e.EnterpriseMeta
}
// GatewayService is used to associate gateways with their linked services.
type GatewayService struct {
Gateway ServiceName
Service ServiceName
GatewayKind ServiceKind
Port int `json:",omitempty"`
Protocol string `json:",omitempty"`
Hosts []string `json:",omitempty"`
CAFile string `json:",omitempty"`
CertFile string `json:",omitempty"`
KeyFile string `json:",omitempty"`
SNI string `json:",omitempty"`
FromWildcard bool `json:",omitempty"`
RaftIndex
}
type GatewayServices []*GatewayService
func (g *GatewayService) Addresses(defaultHosts []string) []string {
if g.Port == 0 {
return nil
}
hosts := g.Hosts
if len(hosts) == 0 {
hosts = defaultHosts
}
var addresses []string
// loop through the hosts and format that into domain.name:port format,
// ensuring we trim any trailing DNS . characters from the domain name as we
// go
for _, h := range hosts {
addresses = append(addresses, fmt.Sprintf("%s:%d", strings.TrimRight(h, "."), g.Port))
}
return addresses
}
func (g *GatewayService) IsSame(o *GatewayService) bool {
return g.Gateway.Matches(o.Gateway) &&
g.Service.Matches(o.Service) &&
g.GatewayKind == o.GatewayKind &&
g.Port == o.Port &&
g.Protocol == o.Protocol &&
stringslice.Equal(g.Hosts, o.Hosts) &&
g.CAFile == o.CAFile &&
g.CertFile == o.CertFile &&
g.KeyFile == o.KeyFile &&
g.SNI == o.SNI &&
g.FromWildcard == o.FromWildcard
}
func (g *GatewayService) Clone() *GatewayService {
return &GatewayService{
Gateway: g.Gateway,
Service: g.Service,
GatewayKind: g.GatewayKind,
Port: g.Port,
Protocol: g.Protocol,
// See https://github.com/go101/go101/wiki/How-to-efficiently-clone-a-slice%3F
Hosts: append(g.Hosts[:0:0], g.Hosts...),
CAFile: g.CAFile,
CertFile: g.CertFile,
KeyFile: g.KeyFile,
SNI: g.SNI,
FromWildcard: g.FromWildcard,
RaftIndex: g.RaftIndex,
}
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package csp
import (
"context"
"crypto/rand"
"encoding/base64"
"strings"
"github.com/google/go-safeweb/safehttp"
)
var randReader = rand.Reader
// nonceSize is the size of the nonces in bytes.
const nonceSize = 20
func generateNonce() string {
b := make([]byte, nonceSize)
_, err := randReader.Read(b)
if err != nil {
// TODO: handle this better, what should happen here?
panic(err)
}
return base64.StdEncoding.EncodeToString(b)
}
// Policy defines a CSP policy.
type Policy struct {
reportOnly bool
// serialize serializes this policy for use in a Content-Security-Policy header
// or in a Content-Security-Policy-Report-Only header. If needsNonce is true,
// a nonce will be provided to serialize.
serialize func(nonce string) string
}
type ctxKey struct{}
// Nonce retrieves the nonce from the given context. If there is no nonce stored
// in the context, an empty string is returned.
func Nonce(ctx context.Context) string {
v := ctx.Value(ctxKey{})
if v == nil {
return ""
}
return v.(string)
}
// StrictCSPBuilder can be used to build a strict, nonce-based CSP.
// See https://csp.withgoogle.com/docs/strict-csp.html for more info.
//
// If BaseURI is an empty string the base-uri directive will be set to 'none'.
type StrictCSPBuilder struct {
ReportOnly bool
StrictDynamic bool
UnsafeEval bool
BaseURI string
ReportURI string
}
// Build creates a Policy based on the specified options.
func (s StrictCSPBuilder) Build() Policy {
return Policy{
reportOnly: s.ReportOnly,
serialize: func(nonce string) string {
var b strings.Builder
b.WriteString("object-src 'none'; script-src 'unsafe-inline' https: http: 'nonce-")
b.WriteString(nonce)
b.WriteString("'")
if s.StrictDynamic {
b.WriteString(" 'strict-dynamic'")
}
if s.UnsafeEval {
b.WriteString(" 'unsafe-eval'")
}
b.WriteString("; base-uri ")
if s.BaseURI == "" {
b.WriteString("'none'")
} else {
b.WriteString(s.BaseURI)
}
if s.ReportURI != "" {
b.WriteString("; report-uri ")
b.WriteString(s.ReportURI)
}
return b.String()
},
}
}
// FramingPolicyBuilder can be used to create a new CSP policy with frame-ancestors
// set to 'self'.
//
// TODO: allow relaxation on specific endpoints according to #77.
type FramingPolicyBuilder struct {
ReportOnly bool
ReportURI string
}
// Build creates a Policy based on the specified options.
func (f FramingPolicyBuilder) Build() Policy {
return Policy{
reportOnly: f.ReportOnly,
serialize: func(_ string) string {
var b strings.Builder
b.WriteString("frame-ancestors 'self'")
if f.ReportURI != "" {
b.WriteString("; report-uri ")
b.WriteString(f.ReportURI)
}
return b.String()
},
}
}
// Interceptor intercepts requests and applies CSP policies.
type Interceptor struct {
Policies []Policy
}
// NewInterceptor creates an interceptor from the provided policies.
func NewInterceptor(p ...Policy) Interceptor {
return Interceptor{Policies: p}
}
// Default creates a new CSP interceptor with a strict nonce-based policy and a
// framing policy, both in enforcement mode.
func Default(reportURI string) Interceptor {
return Interceptor{
Policies: []Policy{
StrictCSPBuilder{ReportURI: reportURI}.Build(),
FramingPolicyBuilder{ReportURI: reportURI}.Build(),
},
}
}
// Before claims and sets the Content-Security-Policy header and the
// Content-Security-Policy-Report-Only header.
func (it Interceptor) Before(w safehttp.ResponseWriter, r *safehttp.IncomingRequest) safehttp.Result {
var csps []string
var reportCsps []string
nonce := generateNonce()
r.SetContext(context.WithValue(r.Context(), ctxKey{}, nonce))
for _, p := range it.Policies {
v := p.serialize(nonce)
if p.reportOnly {
reportCsps = append(reportCsps, v)
} else {
csps = append(csps, v)
}
}
h := w.Header()
setCSP, err := h.Claim("Content-Security-Policy")
if err != nil {
return w.ServerError(safehttp.StatusInternalServerError)
}
setCSP(csps)
setCSPReportOnly, err := h.Claim("Content-Security-Policy-Report-Only")
if err != nil {
return w.ServerError(safehttp.StatusInternalServerError)
}
setCSPReportOnly(reportCsps)
return safehttp.Result{}
}
Added descriptions of fields in FramingPolicyBuilder and StrictCSPBuilder.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package csp
import (
"context"
"crypto/rand"
"encoding/base64"
"strings"
"github.com/google/go-safeweb/safehttp"
)
var randReader = rand.Reader
// nonceSize is the size of the nonces in bytes.
const nonceSize = 20
func generateNonce() string {
b := make([]byte, nonceSize)
_, err := randReader.Read(b)
if err != nil {
// TODO: handle this better, what should happen here?
panic(err)
}
return base64.StdEncoding.EncodeToString(b)
}
// Policy defines a CSP policy.
type Policy struct {
reportOnly bool
// serialize serializes this policy for use in a Content-Security-Policy header
// or in a Content-Security-Policy-Report-Only header. If needsNonce is true,
// a nonce will be provided to serialize.
serialize func(nonce string) string
}
type ctxKey struct{}
// Nonce retrieves the nonce from the given context. If there is no nonce stored
// in the context, an empty string is returned.
func Nonce(ctx context.Context) string {
v := ctx.Value(ctxKey{})
if v == nil {
return ""
}
return v.(string)
}
// StrictCSPBuilder can be used to build a strict, nonce-based CSP.
//
// See https://csp.withgoogle.com/docs/strict-csp.html for more info.
type StrictCSPBuilder struct {
// ReportOnly controls whether this policy should be set as a Content-Security-Policy
// header or a Content-Security-Policy-Report-Only header.
ReportOnly bool
// StrictDynamic controls whether script-src should contain the 'strict-dynamic'
// value.
//
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src#strict-dynamic
// for more info.
StrictDynamic bool
// UnsafeEval controls whether script-src should contain the 'unsafe-eval' value.
// If enabled, the eval() JavaScript function is allowed.
UnsafeEval bool
// BaseURI controls the base-uri directive. If BaseURI is an empty string the
// directive will be set to 'none'. The base-uri directive restricts the URLs
// which can be used in a document's <base> element.
//
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/base-uri
// for more info.
BaseURI string
// ReportURI controls the report-uri directive. If ReportUri is empty, no report-uri
// directive will be set.
ReportURI string
}
// Build creates a Policy based on the specified options.
func (s StrictCSPBuilder) Build() Policy {
return Policy{
reportOnly: s.ReportOnly,
serialize: func(nonce string) string {
var b strings.Builder
b.WriteString("object-src 'none'; script-src 'unsafe-inline' https: http: 'nonce-")
b.WriteString(nonce)
b.WriteString("'")
if s.StrictDynamic {
b.WriteString(" 'strict-dynamic'")
}
if s.UnsafeEval {
b.WriteString(" 'unsafe-eval'")
}
b.WriteString("; base-uri ")
if s.BaseURI == "" {
b.WriteString("'none'")
} else {
b.WriteString(s.BaseURI)
}
if s.ReportURI != "" {
b.WriteString("; report-uri ")
b.WriteString(s.ReportURI)
}
return b.String()
},
}
}
// FramingPolicyBuilder can be used to create a new CSP policy with frame-ancestors
// set to 'self'.
//
// TODO: allow relaxation on specific endpoints according to #77.
type FramingPolicyBuilder struct {
// ReportOnly controls whether this policy should be set as a Content-Security-Policy
// header or a Content-Security-Policy-Report-Only header.
ReportOnly bool
// ReportURI controls the report-uri directive. If ReportUri is empty, no report-uri
// directive will be set.
ReportURI string
}
// Build creates a Policy based on the specified options.
func (f FramingPolicyBuilder) Build() Policy {
return Policy{
reportOnly: f.ReportOnly,
serialize: func(_ string) string {
var b strings.Builder
b.WriteString("frame-ancestors 'self'")
if f.ReportURI != "" {
b.WriteString("; report-uri ")
b.WriteString(f.ReportURI)
}
return b.String()
},
}
}
// Interceptor intercepts requests and applies CSP policies.
type Interceptor struct {
Policies []Policy
}
// NewInterceptor creates an interceptor from the provided policies.
func NewInterceptor(p ...Policy) Interceptor {
return Interceptor{Policies: p}
}
// Default creates a new CSP interceptor with a strict nonce-based policy and a
// framing policy, both in enforcement mode.
func Default(reportURI string) Interceptor {
return Interceptor{
Policies: []Policy{
StrictCSPBuilder{ReportURI: reportURI}.Build(),
FramingPolicyBuilder{ReportURI: reportURI}.Build(),
},
}
}
// Before claims and sets the Content-Security-Policy header and the
// Content-Security-Policy-Report-Only header.
func (it Interceptor) Before(w safehttp.ResponseWriter, r *safehttp.IncomingRequest) safehttp.Result {
var csps []string
var reportCsps []string
nonce := generateNonce()
r.SetContext(context.WithValue(r.Context(), ctxKey{}, nonce))
for _, p := range it.Policies {
v := p.serialize(nonce)
if p.reportOnly {
reportCsps = append(reportCsps, v)
} else {
csps = append(csps, v)
}
}
h := w.Header()
setCSP, err := h.Claim("Content-Security-Policy")
if err != nil {
return w.ServerError(safehttp.StatusInternalServerError)
}
setCSP(csps)
setCSPReportOnly, err := h.Claim("Content-Security-Policy-Report-Only")
if err != nil {
return w.ServerError(safehttp.StatusInternalServerError)
}
setCSPReportOnly(reportCsps)
return safehttp.Result{}
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memfs_test
import (
"io/ioutil"
"log"
"os"
"os/user"
"path"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/samples/memfs"
"github.com/jacobsa/gcsfuse/timeutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
"golang.org/x/net/context"
)
func TestMemFS(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
func currentUid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
uid, err := strconv.ParseUint(user.Uid, 10, 32)
if err != nil {
panic(err)
}
return uint32(uid)
}
func currentGid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
gid, err := strconv.ParseUint(user.Gid, 10, 32)
if err != nil {
panic(err)
}
return uint32(gid)
}
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec)
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
type MemFSTest struct {
clock timeutil.SimulatedClock
mfs *fuse.MountedFileSystem
}
var _ SetUpInterface = &MemFSTest{}
var _ TearDownInterface = &MemFSTest{}
func init() { RegisterTestSuite(&MemFSTest{}) }
func (t *MemFSTest) SetUp(ti *TestInfo) {
var err error
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Now())
// Set up a temporary directory for mounting.
mountPoint, err := ioutil.TempDir("", "memfs_test")
if err != nil {
panic("ioutil.TempDir: " + err.Error())
}
// Mount a file system.
fs := memfs.NewMemFS(&t.clock)
if t.mfs, err = fuse.Mount(mountPoint, fs); err != nil {
panic("Mount: " + err.Error())
}
if err = t.mfs.WaitForReady(context.Background()); err != nil {
panic("MountedFileSystem.WaitForReady: " + err.Error())
}
}
func (t *MemFSTest) TearDown() {
// Unmount the file system. Try again on "resource busy" errors.
delay := 10 * time.Millisecond
for {
err := t.mfs.Unmount()
if err == nil {
break
}
if strings.Contains(err.Error(), "resource busy") {
log.Println("Resource busy error while unmounting; trying again")
time.Sleep(delay)
delay = time.Duration(1.3 * float64(delay))
continue
}
panic("MountedFileSystem.Unmount: " + err.Error())
}
if err := t.mfs.Join(context.Background()); err != nil {
panic("MountedFileSystem.Join: " + err.Error())
}
}
////////////////////////////////////////////////////////////////////////
// Test functions
////////////////////////////////////////////////////////////////////////
func (t *MemFSTest) ContentsOfEmptyFileSystem() {
entries, err := ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Mkdir_OneLevel() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
dirName := path.Join(t.mfs.Dir(), "dir")
// Create a directory within the root.
createTime := t.clock.Now()
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(dirName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Atimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Ctimespec).Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(dirName)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the root.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_TwoLevels() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
// Create a directory within the root.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0700)
AssertEq(nil, err)
// Create a child of that directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent/dir"))
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Atimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Ctimespec).Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent/dir"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_AlreadyExists() {
var err error
dirName := path.Join(t.mfs.Dir(), "dir")
// Create the directory once.
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Attempt to create it again.
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("exists")))
}
func (t *MemFSTest) Mkdir_IntermediateIsFile() {
var err error
// Create a file.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte{}, 0700)
AssertEq(nil, err)
// Attempt to create a directory within the file.
dirName := path.Join(fileName, "dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not a directory")))
}
func (t *MemFSTest) Mkdir_IntermediateIsNonExistent() {
var err error
// Attempt to create a sub-directory of a non-existent sub-directory.
dirName := path.Join(t.mfs.Dir(), "foo/dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Mkdir_PermissionDenied() {
var err error
// Create a directory within the root without write permissions.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0500)
AssertEq(nil, err)
// Attempt to create a child of that directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("permission denied")))
}
func (t *MemFSTest) CreateNewFile_InRoot() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) CreateNewFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) ModifyExistingFile_InRoot() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) ModifyExistingFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_Exists() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NotAFile() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NonExistent() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_StillOpen() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) Rmdir_NonEmpty() {
var err error
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Attempt to remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not empty")))
}
func (t *MemFSTest) Rmdir_Empty() {
var err error
var entries []os.FileInfo
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Remove the leaf.
err = os.Remove(path.Join(t.mfs.Dir(), "foo/bar"))
AssertEq(nil, err)
// There should be nothing left in the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
// Now the root directory should be empty, too.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Rmdir_NonExistent() {
err := os.Remove(path.Join(t.mfs.Dir(), "blah"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Rmdir_OpenedForReading() {
var err error
// Create a directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the directory for reading.
f, err := os.Open(path.Join(t.mfs.Dir(), "dir"))
defer func() {
if f != nil {
ExpectEq(nil, f.Close())
}
}()
AssertEq(nil, err)
// Remove the directory.
err = os.Remove(path.Join(t.mfs.Dir(), "dir"))
AssertEq(nil, err)
// Create a new directory, with the same name even, and add some contents
// within it.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/foo"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/bar"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/baz"), 0700)
AssertEq(nil, err)
// We should still be able to stat the open file handle. It should show up as
// unlinked.
fi, err := f.Stat()
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.ModTime().Sub(createTime))
// TODO(jacobsa): Re-enable this assertion if the following issue is fixed:
// https://github.com/bazillion/fuse/issues/66
// ExpectEq(0, fi.Sys().(*syscall.Stat_t).Nlink)
// Attempt to read from the directory. This should succeed even though it has
// been unlinked, and we shouldn't see any junk from the new directory.
entries, err := f.Readdir(0)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) CaseSensitive() {
var err error
// Create a file.
err = ioutil.WriteFile(path.Join(t.mfs.Dir(), "file"), []byte{}, 0400)
AssertEq(nil, err)
// Create a directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0400)
AssertEq(nil, err)
// Attempt to stat with the wrong case.
names := []string{
"FILE",
"File",
"filE",
"DIR",
"Dir",
"dIr",
}
for _, name := range names {
_, err = os.Stat(path.Join(t.mfs.Dir(), name))
AssertNe(nil, err, "Name: %s", name)
AssertThat(err, Error(HasSubstr("no such file or directory")))
}
}
func (t *MemFSTest) FileReadsAndWrites() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) FileReadsAndWrites_BeyondEOF() {
AssertTrue(false, "TODO")
}
MemFSTest.CreateNewFile_InRoot
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memfs_test
import (
"io/ioutil"
"log"
"os"
"os/user"
"path"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/jacobsa/fuse"
"github.com/jacobsa/fuse/samples/memfs"
"github.com/jacobsa/gcsfuse/timeutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
"golang.org/x/net/context"
)
func TestMemFS(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Helpers
////////////////////////////////////////////////////////////////////////
func currentUid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
uid, err := strconv.ParseUint(user.Uid, 10, 32)
if err != nil {
panic(err)
}
return uint32(uid)
}
func currentGid() uint32 {
user, err := user.Current()
if err != nil {
panic(err)
}
gid, err := strconv.ParseUint(user.Gid, 10, 32)
if err != nil {
panic(err)
}
return uint32(gid)
}
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec)
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
type MemFSTest struct {
clock timeutil.SimulatedClock
mfs *fuse.MountedFileSystem
}
var _ SetUpInterface = &MemFSTest{}
var _ TearDownInterface = &MemFSTest{}
func init() { RegisterTestSuite(&MemFSTest{}) }
func (t *MemFSTest) SetUp(ti *TestInfo) {
var err error
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Now())
// Set up a temporary directory for mounting.
mountPoint, err := ioutil.TempDir("", "memfs_test")
if err != nil {
panic("ioutil.TempDir: " + err.Error())
}
// Mount a file system.
fs := memfs.NewMemFS(&t.clock)
if t.mfs, err = fuse.Mount(mountPoint, fs); err != nil {
panic("Mount: " + err.Error())
}
if err = t.mfs.WaitForReady(context.Background()); err != nil {
panic("MountedFileSystem.WaitForReady: " + err.Error())
}
}
func (t *MemFSTest) TearDown() {
// Unmount the file system. Try again on "resource busy" errors.
delay := 10 * time.Millisecond
for {
err := t.mfs.Unmount()
if err == nil {
break
}
if strings.Contains(err.Error(), "resource busy") {
log.Println("Resource busy error while unmounting; trying again")
time.Sleep(delay)
delay = time.Duration(1.3 * float64(delay))
continue
}
panic("MountedFileSystem.Unmount: " + err.Error())
}
if err := t.mfs.Join(context.Background()); err != nil {
panic("MountedFileSystem.Join: " + err.Error())
}
}
////////////////////////////////////////////////////////////////////////
// Test functions
////////////////////////////////////////////////////////////////////////
func (t *MemFSTest) ContentsOfEmptyFileSystem() {
entries, err := ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Mkdir_OneLevel() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
dirName := path.Join(t.mfs.Dir(), "dir")
// Create a directory within the root.
createTime := t.clock.Now()
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(dirName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Atimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Ctimespec).Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(dirName)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the root.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_TwoLevels() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
var entries []os.FileInfo
// Create a directory within the root.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0700)
AssertEq(nil, err)
// Create a child of that directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat the directory.
fi, err = os.Stat(path.Join(t.mfs.Dir(), "parent/dir"))
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.Size())
ExpectEq(os.ModeDir|0754, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectTrue(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(0, stat.Size)
ExpectEq(0, timespecToTime(stat.Atimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Ctimespec).Sub(createTime))
// Read the directory.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent/dir"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Read the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "parent"))
AssertEq(nil, err)
AssertEq(1, len(entries))
fi = entries[0]
ExpectEq("dir", fi.Name())
ExpectEq(os.ModeDir|0754, fi.Mode())
}
func (t *MemFSTest) Mkdir_AlreadyExists() {
var err error
dirName := path.Join(t.mfs.Dir(), "dir")
// Create the directory once.
err = os.Mkdir(dirName, 0754)
AssertEq(nil, err)
// Attempt to create it again.
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("exists")))
}
func (t *MemFSTest) Mkdir_IntermediateIsFile() {
var err error
// Create a file.
fileName := path.Join(t.mfs.Dir(), "foo")
err = ioutil.WriteFile(fileName, []byte{}, 0700)
AssertEq(nil, err)
// Attempt to create a directory within the file.
dirName := path.Join(fileName, "dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not a directory")))
}
func (t *MemFSTest) Mkdir_IntermediateIsNonExistent() {
var err error
// Attempt to create a sub-directory of a non-existent sub-directory.
dirName := path.Join(t.mfs.Dir(), "foo/dir")
err = os.Mkdir(dirName, 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Mkdir_PermissionDenied() {
var err error
// Create a directory within the root without write permissions.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent"), 0500)
AssertEq(nil, err)
// Attempt to create a child of that directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "parent/dir"), 0754)
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("permission denied")))
}
func (t *MemFSTest) CreateNewFile_InRoot() {
var err error
var fi os.FileInfo
var stat *syscall.Stat_t
fileName := path.Join(t.mfs.Dir(), "foo")
const contents = "Hello\x00world"
// Write a file.
createTime := t.clock.Now()
err = ioutil.WriteFile(fileName, []byte(contents), 0400)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Stat it.
fi, err = os.Stat(fileName)
stat = fi.Sys().(*syscall.Stat_t)
AssertEq(nil, err)
ExpectEq("foo", fi.Name())
ExpectEq(len(contents), fi.Size())
ExpectEq(0400, fi.Mode())
ExpectEq(0, fi.ModTime().Sub(createTime))
ExpectFalse(fi.IsDir())
ExpectNe(0, stat.Ino)
ExpectEq(1, stat.Nlink)
ExpectEq(currentUid(), stat.Uid)
ExpectEq(currentGid(), stat.Gid)
ExpectEq(len(contents), stat.Size)
ExpectEq(0, timespecToTime(stat.Atimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Mtimespec).Sub(createTime))
ExpectEq(0, timespecToTime(stat.Ctimespec).Sub(createTime))
// Read it back.
slice, err := ioutil.ReadFile(fileName)
AssertEq(nil, err)
ExpectEq(contents, string(slice))
}
func (t *MemFSTest) CreateNewFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) ModifyExistingFile_InRoot() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) ModifyExistingFile_InSubDir() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_Exists() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NotAFile() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_NonExistent() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) UnlinkFile_StillOpen() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) Rmdir_NonEmpty() {
var err error
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Attempt to remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("not empty")))
}
func (t *MemFSTest) Rmdir_Empty() {
var err error
var entries []os.FileInfo
// Create two levels of directories.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "foo/bar"), 0754)
AssertEq(nil, err)
// Remove the leaf.
err = os.Remove(path.Join(t.mfs.Dir(), "foo/bar"))
AssertEq(nil, err)
// There should be nothing left in the parent.
entries, err = ioutil.ReadDir(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
// Remove the parent.
err = os.Remove(path.Join(t.mfs.Dir(), "foo"))
AssertEq(nil, err)
// Now the root directory should be empty, too.
entries, err = ioutil.ReadDir(t.mfs.Dir())
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) Rmdir_NonExistent() {
err := os.Remove(path.Join(t.mfs.Dir(), "blah"))
AssertNe(nil, err)
ExpectThat(err, Error(HasSubstr("no such file or directory")))
}
func (t *MemFSTest) Rmdir_OpenedForReading() {
var err error
// Create a directory.
createTime := t.clock.Now()
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0700)
AssertEq(nil, err)
// Simulate time advancing.
t.clock.AdvanceTime(time.Second)
// Open the directory for reading.
f, err := os.Open(path.Join(t.mfs.Dir(), "dir"))
defer func() {
if f != nil {
ExpectEq(nil, f.Close())
}
}()
AssertEq(nil, err)
// Remove the directory.
err = os.Remove(path.Join(t.mfs.Dir(), "dir"))
AssertEq(nil, err)
// Create a new directory, with the same name even, and add some contents
// within it.
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/foo"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/bar"), 0700)
AssertEq(nil, err)
err = os.MkdirAll(path.Join(t.mfs.Dir(), "dir/baz"), 0700)
AssertEq(nil, err)
// We should still be able to stat the open file handle. It should show up as
// unlinked.
fi, err := f.Stat()
ExpectEq("dir", fi.Name())
ExpectEq(0, fi.ModTime().Sub(createTime))
// TODO(jacobsa): Re-enable this assertion if the following issue is fixed:
// https://github.com/bazillion/fuse/issues/66
// ExpectEq(0, fi.Sys().(*syscall.Stat_t).Nlink)
// Attempt to read from the directory. This should succeed even though it has
// been unlinked, and we shouldn't see any junk from the new directory.
entries, err := f.Readdir(0)
AssertEq(nil, err)
ExpectThat(entries, ElementsAre())
}
func (t *MemFSTest) CaseSensitive() {
var err error
// Create a file.
err = ioutil.WriteFile(path.Join(t.mfs.Dir(), "file"), []byte{}, 0400)
AssertEq(nil, err)
// Create a directory.
err = os.Mkdir(path.Join(t.mfs.Dir(), "dir"), 0400)
AssertEq(nil, err)
// Attempt to stat with the wrong case.
names := []string{
"FILE",
"File",
"filE",
"DIR",
"Dir",
"dIr",
}
for _, name := range names {
_, err = os.Stat(path.Join(t.mfs.Dir(), name))
AssertNe(nil, err, "Name: %s", name)
AssertThat(err, Error(HasSubstr("no such file or directory")))
}
}
func (t *MemFSTest) FileReadsAndWrites() {
AssertTrue(false, "TODO")
}
func (t *MemFSTest) FileReadsAndWrites_BeyondEOF() {
AssertTrue(false, "TODO")
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsproxy_test
import (
"bytes"
"fmt"
"io"
"math"
"math/rand"
"testing"
"time"
"golang.org/x/net/context"
"github.com/googlecloudplatform/gcsfuse/gcsproxy"
"github.com/googlecloudplatform/gcsfuse/lease"
"github.com/googlecloudplatform/gcsfuse/timeutil"
"github.com/jacobsa/gcloud/gcs"
"github.com/jacobsa/gcloud/gcs/gcsfake"
"github.com/jacobsa/gcloud/gcs/gcsutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
)
func TestIntegration(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
// Create random content of the given length, which must be a multiple of 4.
func randBytes(n int) (b []byte) {
if n%4 != 0 {
panic(fmt.Sprintf("Invalid n: %d", n))
}
b = make([]byte, n)
for i := 0; i < n; i += 4 {
w := rand.Uint32()
b[i] = byte(w >> 24)
b[i+1] = byte(w >> 16)
b[i+2] = byte(w >> 8)
b[i+3] = byte(w >> 0)
}
return
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
const chunkSize = 1<<18 + 3
const fileLeaserLimitNumFiles = math.MaxInt32
const fileLeaserLimitBytes = 1 << 21
type IntegrationTest struct {
ctx context.Context
bucket gcs.Bucket
leaser lease.FileLeaser
clock timeutil.SimulatedClock
mo *checkingMutableObject
}
var _ SetUpInterface = &IntegrationTest{}
var _ TearDownInterface = &IntegrationTest{}
func init() { RegisterTestSuite(&IntegrationTest{}) }
func (t *IntegrationTest) SetUp(ti *TestInfo) {
t.ctx = ti.Ctx
t.bucket = gcsfake.NewFakeBucket(&t.clock, "some_bucket")
t.leaser = lease.NewFileLeaser(
"",
fileLeaserLimitNumFiles,
fileLeaserLimitBytes)
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local))
}
func (t *IntegrationTest) TearDown() {
if t.mo != nil {
t.mo.Destroy()
}
}
func (t *IntegrationTest) create(o *gcs.Object) {
// Ensure invariants are checked.
t.mo = &checkingMutableObject{
ctx: t.ctx,
wrapped: gcsproxy.NewMutableObject(
chunkSize,
o,
t.bucket,
t.leaser,
&t.clock),
}
}
// Return the object generation, or -1 if non-existent. Panic on error.
func (t *IntegrationTest) objectGeneration(name string) (gen int64) {
// Stat.
req := &gcs.StatObjectRequest{Name: name}
o, err := t.bucket.StatObject(t.ctx, req)
if _, ok := err.(*gcs.NotFoundError); ok {
gen = -1
return
}
if err != nil {
panic(err)
}
// Check the result.
if o.Generation > math.MaxInt64 {
panic(fmt.Sprintf("Out of range: %v", o.Generation))
}
gen = o.Generation
return
}
////////////////////////////////////////////////////////////////////////
// Tests
////////////////////////////////////////////////////////////////////////
func (t *IntegrationTest) ReadThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Read the contents.
buf := make([]byte, 1024)
n, err := t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(io.EOF, nil))
ExpectEq(len("taco"), n)
ExpectEq("taco", string(buf[:n]))
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectEq(o.Generation, t.mo.SourceGeneration())
ExpectEq(o.Generation, t.objectGeneration("foo"))
}
func (t *IntegrationTest) WriteThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Overwrite the first byte.
n, err := t.mo.WriteAt([]byte("p"), 0)
AssertEq(nil, err)
ExpectEq(1, n)
// Sync should save out the new generation.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectNe(o.Generation, t.mo.SourceGeneration())
ExpectEq(t.objectGeneration("foo"), t.mo.SourceGeneration())
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo")
AssertEq(nil, err)
ExpectEq("paco", contents)
}
func (t *IntegrationTest) TruncateThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Truncate.
err = t.mo.Truncate(2)
AssertEq(nil, err)
// Sync should save out the new generation.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectNe(o.Generation, t.mo.SourceGeneration())
ExpectEq(t.objectGeneration("foo"), t.mo.SourceGeneration())
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo")
AssertEq(nil, err)
ExpectEq("ta", contents)
}
func (t *IntegrationTest) Stat_InitialState() {
// Create.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
t.create(o)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) Stat_Synced() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Dirty.
t.clock.AdvanceTime(time.Second)
truncateTime := t.clock.Now()
err = t.mo.Truncate(2)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Sync.
err = t.mo.Sync()
AssertEq(nil, err)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(2, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) Stat_Dirty() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Dirty.
t.clock.AdvanceTime(time.Second)
truncateTime := t.clock.Now()
err = t.mo.Truncate(2)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(2, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) WithinLeaserLimit() {
AssertLt(len("taco"), fileLeaserLimitBytes)
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Extend to be up against the leaser limit, then write out to GCS, which
// should downgrade to a read proxy.
err = t.mo.Truncate(fileLeaserLimitBytes)
AssertEq(nil, err)
err = t.mo.Sync()
AssertEq(nil, err)
// The backing object should be present and contain the correct contents.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq(fileLeaserLimitBytes, len(contents))
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// We should still be able to read the contents, because the read lease
// should still be valid.
buf := make([]byte, 4)
n, err := t.mo.ReadAt(buf, 0)
AssertEq(nil, err)
ExpectEq("taco", string(buf[0:n]))
}
func (t *IntegrationTest) LargerThanLeaserLimit() {
AssertLt(len("taco"), fileLeaserLimitBytes)
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Extend to be past the leaser limit, then write out to GCS, which should
// downgrade to a read proxy.
err = t.mo.Truncate(fileLeaserLimitBytes + 1)
AssertEq(nil, err)
err = t.mo.Sync()
AssertEq(nil, err)
// The backing object should be present and contain the correct contents.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq(fileLeaserLimitBytes+1, len(contents))
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// The contents should be lost, because the leaser should have revoked the
// read lease.
_, err = t.mo.ReadAt(make([]byte, len(contents)), 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenDeleted_BeforeReading() {
// Create an object to obtain a record, then delete it.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// Create a mutable object around it.
t.create(o)
// Synchronously-available things should work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectTrue(sr.Clobbered)
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
// Anything that needs to fault in the contents should fail.
_, err = t.mo.ReadAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
err = t.mo.Truncate(10)
ExpectThat(err, Error(HasSubstr("not found")))
_, err = t.mo.WriteAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Fault in the contents.
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// Reading and modications should still work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte("a"), 0)
AssertEq(nil, err)
truncateTime := t.clock.Now()
err = t.mo.Truncate(1)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat should see the current state, and see that the object has been
// clobbered.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(1, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectTrue(sr.Clobbered)
// Sync should fail with a precondition error.
err = t.mo.Sync()
ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{}))
// Nothing should have been created.
_, err = gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{}))
}
func (t *IntegrationTest) BackingObjectHasBeenOverwritten_BeforeReading() {
// Create an object, then create the mutable object wrapper around it.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
t.create(o)
// Overwrite the GCS object.
_, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", "burrito")
AssertEq(nil, err)
// Synchronously-available things should work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectTrue(sr.Clobbered)
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
// Anything that needs to fault in the contents should fail.
_, err = t.mo.ReadAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
err = t.mo.Truncate(10)
ExpectThat(err, Error(HasSubstr("not found")))
_, err = t.mo.WriteAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Fault in the contents.
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
// Overwrite the backing object.
_, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", "burrito")
AssertEq(nil, err)
// Reading and modications should still work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte("a"), 0)
AssertEq(nil, err)
truncateTime := t.clock.Now()
err = t.mo.Truncate(3)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat should see the current state, and see that the object has been
// clobbered.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(3, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectTrue(sr.Clobbered)
// Sync should fail with a precondition error.
err = t.mo.Sync()
ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{}))
// The newer version should still be present.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq("burrito", contents)
}
func (t *IntegrationTest) MultipleInteractions() {
// We will run through the script below for multiple interesting object
// sizes.
sizes := []int{
0,
1,
chunkSize - 1,
chunkSize,
chunkSize + 1,
3*chunkSize - 1,
3 * chunkSize,
3*chunkSize + 1,
fileLeaserLimitBytes - 1,
fileLeaserLimitBytes,
fileLeaserLimitBytes + 1,
((fileLeaserLimitBytes / chunkSize) - 1) * chunkSize,
(fileLeaserLimitBytes / chunkSize) * chunkSize,
((fileLeaserLimitBytes / chunkSize) + 1) * chunkSize,
}
// Generate random contents for the maximum size.
var maxSize int
for _, size := range sizes {
if size > maxSize {
maxSize = size
}
}
randData := randBytes(maxSize)
// Transition the mutable object in and out of the dirty state. Make sure
// everything stays consistent.
for i, size := range sizes {
desc := fmt.Sprintf("test case %d (size %d)", i, size)
name := fmt.Sprintf("obj_%d", i)
buf := make([]byte, size)
// Create the backing object with random initial contents.
expectedContents := make([]byte, size)
copy(expectedContents, randData)
o, err := gcsutil.CreateObject(
t.ctx,
t.bucket,
name,
string(expectedContents))
AssertEq(nil, err)
// Create a mutable object around it.
t.create(o)
// Read the contents of the mutable object.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Modify some bytes.
if size > 0 {
expectedContents[0] = 17
expectedContents[size/2] = 19
expectedContents[size-1] = 23
_, err = t.mo.WriteAt([]byte{17}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte{19}, int64(size/2))
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte{23}, int64(size-1))
AssertEq(nil, err)
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Sync and check the backing object's contents.
err = t.mo.Sync()
AssertEq(nil, err)
objContents, err := gcsutil.ReadObject(t.ctx, t.bucket, name)
AssertEq(nil, err)
if !bytes.Equal([]byte(objContents), expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Dirty again.
if size > 0 {
expectedContents[0] = 29
_, err = t.mo.WriteAt([]byte{29}, 0)
AssertEq(nil, err)
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
}
}
Disabled a test.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO(jacobsa): Re-enable and fix this test after refactoring. See #69.
// +build integration
package gcsproxy_test
import (
"bytes"
"fmt"
"io"
"math"
"math/rand"
"testing"
"time"
"golang.org/x/net/context"
"github.com/googlecloudplatform/gcsfuse/gcsproxy"
"github.com/googlecloudplatform/gcsfuse/lease"
"github.com/googlecloudplatform/gcsfuse/timeutil"
"github.com/jacobsa/gcloud/gcs"
"github.com/jacobsa/gcloud/gcs/gcsfake"
"github.com/jacobsa/gcloud/gcs/gcsutil"
. "github.com/jacobsa/oglematchers"
. "github.com/jacobsa/ogletest"
)
func TestIntegration(t *testing.T) { RunTests(t) }
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
// Create random content of the given length, which must be a multiple of 4.
func randBytes(n int) (b []byte) {
if n%4 != 0 {
panic(fmt.Sprintf("Invalid n: %d", n))
}
b = make([]byte, n)
for i := 0; i < n; i += 4 {
w := rand.Uint32()
b[i] = byte(w >> 24)
b[i+1] = byte(w >> 16)
b[i+2] = byte(w >> 8)
b[i+3] = byte(w >> 0)
}
return
}
////////////////////////////////////////////////////////////////////////
// Boilerplate
////////////////////////////////////////////////////////////////////////
const chunkSize = 1<<18 + 3
const fileLeaserLimitNumFiles = math.MaxInt32
const fileLeaserLimitBytes = 1 << 21
type IntegrationTest struct {
ctx context.Context
bucket gcs.Bucket
leaser lease.FileLeaser
clock timeutil.SimulatedClock
mo *checkingMutableObject
}
var _ SetUpInterface = &IntegrationTest{}
var _ TearDownInterface = &IntegrationTest{}
func init() { RegisterTestSuite(&IntegrationTest{}) }
func (t *IntegrationTest) SetUp(ti *TestInfo) {
t.ctx = ti.Ctx
t.bucket = gcsfake.NewFakeBucket(&t.clock, "some_bucket")
t.leaser = lease.NewFileLeaser(
"",
fileLeaserLimitNumFiles,
fileLeaserLimitBytes)
// Set up a fixed, non-zero time.
t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local))
}
func (t *IntegrationTest) TearDown() {
if t.mo != nil {
t.mo.Destroy()
}
}
func (t *IntegrationTest) create(o *gcs.Object) {
// Ensure invariants are checked.
t.mo = &checkingMutableObject{
ctx: t.ctx,
wrapped: gcsproxy.NewMutableObject(
chunkSize,
o,
t.bucket,
t.leaser,
&t.clock),
}
}
// Return the object generation, or -1 if non-existent. Panic on error.
func (t *IntegrationTest) objectGeneration(name string) (gen int64) {
// Stat.
req := &gcs.StatObjectRequest{Name: name}
o, err := t.bucket.StatObject(t.ctx, req)
if _, ok := err.(*gcs.NotFoundError); ok {
gen = -1
return
}
if err != nil {
panic(err)
}
// Check the result.
if o.Generation > math.MaxInt64 {
panic(fmt.Sprintf("Out of range: %v", o.Generation))
}
gen = o.Generation
return
}
////////////////////////////////////////////////////////////////////////
// Tests
////////////////////////////////////////////////////////////////////////
func (t *IntegrationTest) ReadThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Read the contents.
buf := make([]byte, 1024)
n, err := t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(io.EOF, nil))
ExpectEq(len("taco"), n)
ExpectEq("taco", string(buf[:n]))
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectEq(o.Generation, t.mo.SourceGeneration())
ExpectEq(o.Generation, t.objectGeneration("foo"))
}
func (t *IntegrationTest) WriteThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Overwrite the first byte.
n, err := t.mo.WriteAt([]byte("p"), 0)
AssertEq(nil, err)
ExpectEq(1, n)
// Sync should save out the new generation.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectNe(o.Generation, t.mo.SourceGeneration())
ExpectEq(t.objectGeneration("foo"), t.mo.SourceGeneration())
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo")
AssertEq(nil, err)
ExpectEq("paco", contents)
}
func (t *IntegrationTest) TruncateThenSync() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Truncate.
err = t.mo.Truncate(2)
AssertEq(nil, err)
// Sync should save out the new generation.
err = t.mo.Sync()
ExpectEq(nil, err)
ExpectNe(o.Generation, t.mo.SourceGeneration())
ExpectEq(t.objectGeneration("foo"), t.mo.SourceGeneration())
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo")
AssertEq(nil, err)
ExpectEq("ta", contents)
}
func (t *IntegrationTest) Stat_InitialState() {
// Create.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
t.create(o)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) Stat_Synced() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Dirty.
t.clock.AdvanceTime(time.Second)
truncateTime := t.clock.Now()
err = t.mo.Truncate(2)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Sync.
err = t.mo.Sync()
AssertEq(nil, err)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(2, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) Stat_Dirty() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Dirty.
t.clock.AdvanceTime(time.Second)
truncateTime := t.clock.Now()
err = t.mo.Truncate(2)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(2, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectFalse(sr.Clobbered)
}
func (t *IntegrationTest) WithinLeaserLimit() {
AssertLt(len("taco"), fileLeaserLimitBytes)
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Extend to be up against the leaser limit, then write out to GCS, which
// should downgrade to a read proxy.
err = t.mo.Truncate(fileLeaserLimitBytes)
AssertEq(nil, err)
err = t.mo.Sync()
AssertEq(nil, err)
// The backing object should be present and contain the correct contents.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq(fileLeaserLimitBytes, len(contents))
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// We should still be able to read the contents, because the read lease
// should still be valid.
buf := make([]byte, 4)
n, err := t.mo.ReadAt(buf, 0)
AssertEq(nil, err)
ExpectEq("taco", string(buf[0:n]))
}
func (t *IntegrationTest) LargerThanLeaserLimit() {
AssertLt(len("taco"), fileLeaserLimitBytes)
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Extend to be past the leaser limit, then write out to GCS, which should
// downgrade to a read proxy.
err = t.mo.Truncate(fileLeaserLimitBytes + 1)
AssertEq(nil, err)
err = t.mo.Sync()
AssertEq(nil, err)
// The backing object should be present and contain the correct contents.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq(fileLeaserLimitBytes+1, len(contents))
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// The contents should be lost, because the leaser should have revoked the
// read lease.
_, err = t.mo.ReadAt(make([]byte, len(contents)), 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenDeleted_BeforeReading() {
// Create an object to obtain a record, then delete it.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// Create a mutable object around it.
t.create(o)
// Synchronously-available things should work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectTrue(sr.Clobbered)
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
// Anything that needs to fault in the contents should fail.
_, err = t.mo.ReadAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
err = t.mo.Truncate(10)
ExpectThat(err, Error(HasSubstr("not found")))
_, err = t.mo.WriteAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Fault in the contents.
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
// Delete the backing object.
err = t.bucket.DeleteObject(t.ctx, o.Name)
AssertEq(nil, err)
// Reading and modications should still work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte("a"), 0)
AssertEq(nil, err)
truncateTime := t.clock.Now()
err = t.mo.Truncate(1)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat should see the current state, and see that the object has been
// clobbered.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(1, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectTrue(sr.Clobbered)
// Sync should fail with a precondition error.
err = t.mo.Sync()
ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{}))
// Nothing should have been created.
_, err = gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{}))
}
func (t *IntegrationTest) BackingObjectHasBeenOverwritten_BeforeReading() {
// Create an object, then create the mutable object wrapper around it.
createTime := t.clock.Now()
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
t.create(o)
// Overwrite the GCS object.
_, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", "burrito")
AssertEq(nil, err)
// Synchronously-available things should work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(o.Size, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(createTime))
ExpectTrue(sr.Clobbered)
// Sync doesn't need to do anything.
err = t.mo.Sync()
ExpectEq(nil, err)
// Anything that needs to fault in the contents should fail.
_, err = t.mo.ReadAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
err = t.mo.Truncate(10)
ExpectThat(err, Error(HasSubstr("not found")))
_, err = t.mo.WriteAt([]byte{}, 0)
ExpectThat(err, Error(HasSubstr("not found")))
}
func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() {
// Create.
o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", "taco")
AssertEq(nil, err)
t.create(o)
// Fault in the contents.
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
// Overwrite the backing object.
_, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", "burrito")
AssertEq(nil, err)
// Reading and modications should still work.
ExpectEq(o.Generation, t.mo.SourceGeneration())
_, err = t.mo.ReadAt([]byte{}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte("a"), 0)
AssertEq(nil, err)
truncateTime := t.clock.Now()
err = t.mo.Truncate(3)
AssertEq(nil, err)
t.clock.AdvanceTime(time.Second)
// Stat should see the current state, and see that the object has been
// clobbered.
sr, err := t.mo.Stat(true)
AssertEq(nil, err)
ExpectEq(3, sr.Size)
ExpectThat(sr.Mtime, timeutil.TimeEq(truncateTime))
ExpectTrue(sr.Clobbered)
// Sync should fail with a precondition error.
err = t.mo.Sync()
ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{}))
// The newer version should still be present.
contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name)
AssertEq(nil, err)
ExpectEq("burrito", contents)
}
func (t *IntegrationTest) MultipleInteractions() {
// We will run through the script below for multiple interesting object
// sizes.
sizes := []int{
0,
1,
chunkSize - 1,
chunkSize,
chunkSize + 1,
3*chunkSize - 1,
3 * chunkSize,
3*chunkSize + 1,
fileLeaserLimitBytes - 1,
fileLeaserLimitBytes,
fileLeaserLimitBytes + 1,
((fileLeaserLimitBytes / chunkSize) - 1) * chunkSize,
(fileLeaserLimitBytes / chunkSize) * chunkSize,
((fileLeaserLimitBytes / chunkSize) + 1) * chunkSize,
}
// Generate random contents for the maximum size.
var maxSize int
for _, size := range sizes {
if size > maxSize {
maxSize = size
}
}
randData := randBytes(maxSize)
// Transition the mutable object in and out of the dirty state. Make sure
// everything stays consistent.
for i, size := range sizes {
desc := fmt.Sprintf("test case %d (size %d)", i, size)
name := fmt.Sprintf("obj_%d", i)
buf := make([]byte, size)
// Create the backing object with random initial contents.
expectedContents := make([]byte, size)
copy(expectedContents, randData)
o, err := gcsutil.CreateObject(
t.ctx,
t.bucket,
name,
string(expectedContents))
AssertEq(nil, err)
// Create a mutable object around it.
t.create(o)
// Read the contents of the mutable object.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Modify some bytes.
if size > 0 {
expectedContents[0] = 17
expectedContents[size/2] = 19
expectedContents[size-1] = 23
_, err = t.mo.WriteAt([]byte{17}, 0)
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte{19}, int64(size/2))
AssertEq(nil, err)
_, err = t.mo.WriteAt([]byte{23}, int64(size-1))
AssertEq(nil, err)
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Sync and check the backing object's contents.
err = t.mo.Sync()
AssertEq(nil, err)
objContents, err := gcsutil.ReadObject(t.ctx, t.bucket, name)
AssertEq(nil, err)
if !bytes.Equal([]byte(objContents), expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
// Dirty again.
if size > 0 {
expectedContents[0] = 29
_, err = t.mo.WriteAt([]byte{29}, 0)
AssertEq(nil, err)
}
// Compare contents again.
_, err = t.mo.ReadAt(buf, 0)
AssertThat(err, AnyOf(nil, io.EOF))
if !bytes.Equal(buf, expectedContents) {
AddFailure("Contents mismatch for %s", desc)
AbortTest()
}
}
}
|
// Copyright 2016 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testhelpers provides common support behavior for tests
package testhelpers
import (
"fmt"
"os"
"runtime"
"sync"
"golang.org/x/sys/unix"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func GetInode(path string) (uint64, error) {
file, err := os.Open(path)
if err != nil {
return 0, err
}
defer file.Close()
return GetInodeF(file)
}
func GetInodeF(file *os.File) (uint64, error) {
stat := &unix.Stat_t{}
err := unix.Fstat(int(file.Fd()), stat)
return stat.Ino, err
}
func MakeNetworkNS(containerID string) string {
namespace := "/var/run/netns/" + containerID
err := os.MkdirAll("/var/run/netns", 0600)
Expect(err).NotTo(HaveOccurred())
// create an empty file at the mount point
mountPointFd, err := os.Create(namespace)
Expect(err).NotTo(HaveOccurred())
mountPointFd.Close()
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function
go (func() {
defer wg.Done()
runtime.LockOSThread()
defer runtime.UnlockOSThread()
defer GinkgoRecover()
// capture current thread's original netns
pid := unix.Getpid()
tid := unix.Gettid()
currentThreadNetNSPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)
originalNetNS, err := unix.Open(currentThreadNetNSPath, unix.O_RDONLY, 0)
Expect(err).NotTo(HaveOccurred())
defer unix.Close(originalNetNS)
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
Expect(err).NotTo(HaveOccurred())
// bind mount the new netns from the current thread onto the mount point
err = unix.Mount(currentThreadNetNSPath, namespace, "none", unix.MS_BIND, "")
Expect(err).NotTo(HaveOccurred())
// reset current thread's netns to the original
_, _, e1 := unix.Syscall(unix.SYS_SETNS, uintptr(originalNetNS), uintptr(unix.CLONE_NEWNET), 0)
Expect(e1).To(BeZero())
})()
wg.Wait()
return namespace
}
func RemoveNetworkNS(networkNS string) error {
err := unix.Unmount(networkNS, unix.MNT_DETACH)
err = os.RemoveAll(networkNS)
return err
}
Document use of goroutine and lockosthread in test helpers
// Copyright 2016 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testhelpers provides common support behavior for tests
package testhelpers
import (
"fmt"
"os"
"runtime"
"sync"
"golang.org/x/sys/unix"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func GetInode(path string) (uint64, error) {
file, err := os.Open(path)
if err != nil {
return 0, err
}
defer file.Close()
return GetInodeF(file)
}
func GetInodeF(file *os.File) (uint64, error) {
stat := &unix.Stat_t{}
err := unix.Fstat(int(file.Fd()), stat)
return stat.Ino, err
}
/*
A note about goroutines, Linux namespaces and runtime.LockOSThread
In Linux, network namespaces have thread affinity.
In the Go language runtime, goroutines do not have affinity for OS threads.
The Go runtime scheduler moves goroutines around amongst OS threads. It
is supposed to be transparent to the Go programmer.
In order to address cases where the programmer needs thread affinity, Go
provides runtime.LockOSThread and runtime.UnlockOSThread()
However, the Go runtime does not reference count the Lock and Unlock calls.
Repeated calls to Lock will succeed, but the first call to Unlock will unlock
everything. Therefore, it is dangerous to hide a Lock/Unlock in a library
function, such as in this package.
The code below, in MakeNetworkNS, avoids this problem by spinning up a new
Go routine specifically so that LockOSThread can be called on it. Thus
goroutine-thread affinity is maintained long enough to perform all the required
namespace operations.
Because the LockOSThread call is performed inside this short-lived goroutine,
there is no effect either way on the caller's goroutine-thread affinity.
* */
func MakeNetworkNS(containerID string) string {
namespace := "/var/run/netns/" + containerID
err := os.MkdirAll("/var/run/netns", 0600)
Expect(err).NotTo(HaveOccurred())
// create an empty file at the mount point
mountPointFd, err := os.Create(namespace)
Expect(err).NotTo(HaveOccurred())
mountPointFd.Close()
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function. See block comment above.
go (func() {
defer wg.Done()
runtime.LockOSThread()
defer runtime.UnlockOSThread()
defer GinkgoRecover()
// capture current thread's original netns
pid := unix.Getpid()
tid := unix.Gettid()
currentThreadNetNSPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)
originalNetNS, err := unix.Open(currentThreadNetNSPath, unix.O_RDONLY, 0)
Expect(err).NotTo(HaveOccurred())
defer unix.Close(originalNetNS)
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
Expect(err).NotTo(HaveOccurred())
// bind mount the new netns from the current thread onto the mount point
err = unix.Mount(currentThreadNetNSPath, namespace, "none", unix.MS_BIND, "")
Expect(err).NotTo(HaveOccurred())
// reset current thread's netns to the original
_, _, e1 := unix.Syscall(unix.SYS_SETNS, uintptr(originalNetNS), uintptr(unix.CLONE_NEWNET), 0)
Expect(e1).To(BeZero())
})()
wg.Wait()
return namespace
}
func RemoveNetworkNS(networkNS string) error {
err := unix.Unmount(networkNS, unix.MNT_DETACH)
err = os.RemoveAll(networkNS)
return err
}
|
package pool_test
import (
"os"
"os/exec"
"testing"
"time"
"github.com/garyburd/redigo/redis"
"github.com/soundcloud/roshi/pool"
)
func TestRecovery(t *testing.T) {
binary := "redis-server"
absBinary, err := exec.LookPath(binary)
if err != nil {
t.Fatalf("%s: %s", binary, err)
}
// Build a cluster.
port := "10001"
maxConnectionsPerInstance := 2
redisTimeout := 50 * time.Millisecond
p := pool.New(
[]string{"localhost:" + port},
redisTimeout, redisTimeout, redisTimeout,
maxConnectionsPerInstance,
pool.Murmur3,
)
waitDuration, err := time.ParseDuration(os.Getenv("TEST_REDIS_WAIT_DURATION"))
if err != nil {
waitDuration = 100 * time.Millisecond
}
t.Logf("TEST_REDIS_WAIT_DURATION is %s", waitDuration)
func() {
// Start Redis
cmd := exec.Command(absBinary, "--port", port)
if err := cmd.Start(); err != nil {
t.Fatalf("Starting %s: %s", binary, err)
}
defer cmd.Process.Kill()
time.Sleep(waitDuration)
// Try initial PING
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err != nil {
t.Fatalf("Initial PING failed: %s", err)
}
t.Logf("Initial PING OK")
}()
terminal := make(chan struct{})
requests := maxConnectionsPerInstance * 2 // > maxConnectionsPerInstance
go func() {
// Redis is down. Make a bunch of requests. All should fail quickly.
for i := 0; i < requests; i++ {
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err == nil {
t.Errorf("Terminal PING succeeded, but we expected failure.")
} else {
t.Logf("Terminal PING failed (%s), but that was expected", err)
}
}
close(terminal)
}()
select {
case <-terminal:
t.Logf("Terminal PINGs completed in time.")
case <-time.After(2 * time.Duration(requests) * redisTimeout):
t.Fatalf("Terminal PINGs timed out. Deadlock in connection pool?")
}
func() {
// Restart Redis
cmd := exec.Command(absBinary, "--port", port)
if err := cmd.Start(); err != nil {
t.Fatalf("Starting %s: %s", binary, err)
}
defer cmd.Process.Kill()
time.Sleep(waitDuration)
// Try second PING x1
err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
})
t.Logf("Second PING x1 gave error %v (just FYI)", err)
time.Sleep(1*time.Second) // attempt to scoot by a problem with Travis
// Try second PING x2
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err != nil {
t.Errorf("Second PING x2 failed: %s", err)
} else {
t.Logf("Second PING x2 OK")
}
}()
}
pool: attempt to strengthen integration test
package pool_test
import (
"os"
"os/exec"
"testing"
"time"
"github.com/garyburd/redigo/redis"
"github.com/soundcloud/roshi/pool"
)
func TestRecovery(t *testing.T) {
binary := "redis-server"
absBinary, err := exec.LookPath(binary)
if err != nil {
t.Fatalf("%s: %s", binary, err)
}
// Build a cluster.
var (
port = "10001"
maxConnectionsPerInstance = 2
redisTimeout = 1000 * time.Millisecond
)
p := pool.New(
[]string{"localhost:" + port},
redisTimeout, redisTimeout, redisTimeout,
maxConnectionsPerInstance,
pool.Murmur3,
)
waitDuration, err := time.ParseDuration(os.Getenv("TEST_REDIS_WAIT_DURATION"))
if err != nil {
waitDuration = 100 * time.Millisecond
}
t.Logf("TEST_REDIS_WAIT_DURATION is %s", waitDuration)
func() {
// Start Redis
cmd := exec.Command(absBinary, "--port", port)
if err := cmd.Start(); err != nil {
t.Fatalf("Starting %s: %s", binary, err)
}
defer cmd.Process.Kill()
time.Sleep(waitDuration)
// Try initial PING
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err != nil {
t.Fatalf("Initial PING failed: %s", err)
}
t.Logf("Initial PING OK")
}()
terminal := make(chan struct{})
requests := maxConnectionsPerInstance * 2 // > maxConnectionsPerInstance
go func() {
// Redis is down. Make a bunch of requests. All should fail quickly.
for i := 0; i < requests; i++ {
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err == nil {
t.Errorf("Terminal PING succeeded, but we expected failure.")
} else {
t.Logf("Terminal PING failed (%s), but that was expected", err)
}
}
close(terminal)
}()
select {
case <-terminal:
t.Logf("Terminal PINGs completed in time.")
case <-time.After(2 * time.Duration(requests) * redisTimeout):
t.Fatalf("Terminal PINGs timed out. Deadlock in connection pool?")
}
func() {
// Restart Redis
cmd := exec.Command(absBinary, "--port", port)
if err := cmd.Start(); err != nil {
t.Fatalf("Starting %s: %s", binary, err)
}
defer cmd.Process.Kill()
time.Sleep(waitDuration)
// Try second PING x1
err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
})
t.Logf("Second PING x1 gave error %v (just FYI)", err)
time.Sleep(1 * time.Second) // attempt to scoot by a problem with Travis
// Try second PING x2
if err := p.With("irrelevant", func(conn redis.Conn) error {
_, err := conn.Do("PING")
return err
}); err != nil {
t.Errorf("Second PING x2 failed: %s", err)
} else {
t.Logf("Second PING x2 OK")
}
}()
}
|
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package java
import (
"bytes"
"fmt"
"log"
"strings"
"v.io/x/ref/lib/vdl/compile"
"v.io/x/ref/lib/vdl/vdlutil"
)
const arrayTmpl = header + `
// Source: {{.SourceFile}}
package {{.Package}};
/**
* type {{.Name}} {{.VdlTypeString}} {{.Doc}}
**/
@io.v.v23.vdl.GeneratedFromVdl(name = "{{.VdlTypeName}}")
{{ .AccessModifier }} final class {{.Name}} extends io.v.v23.vdl.VdlArray<{{.ElemType}}> {
private static final long serialVersionUID = 1L;
public static final int LENGTH = {{.Length}};
public static final io.v.v23.vdl.VdlType VDL_TYPE =
io.v.v23.vdl.Types.getVdlTypeFromReflect({{.Name}}.class);
public {{.Name}}({{.ElemType}}[] arr) {
super(VDL_TYPE, arr);
}
public {{.Name}}() {
this({{.ZeroValue}});
}
{{ if .ElemIsPrimitive }}
public {{.Name}}({{ .ElemPrimitiveType }}[] arr) {
super(VDL_TYPE, convert(arr));
}
private static {{ .ElemType }}[] convert({{ .ElemPrimitiveType }}[] arr) {
final {{ .ElemType }}[] ret = new {{ .ElemType }}[arr.length];
for (int i = 0; i < arr.length; ++i) {
ret[i] = arr[i];
}
return ret;
}
{{ end }}
}
`
// genJavaArrayFile generates the Java class file for the provided named array type.
func genJavaArrayFile(tdef *compile.TypeDef, env *compile.Env) JavaFileInfo {
javaTypeName := vdlutil.FirstRuneToUpper(tdef.Name)
elemType := javaType(tdef.Type.Elem(), true, env)
elems := strings.TrimSuffix(strings.Repeat(javaZeroValue(tdef.Type.Elem(), env)+", ", tdef.Type.Len()), ", ")
zeroValue := fmt.Sprintf("new %s[] {%s}", elemType, elems)
data := struct {
FileDoc string
AccessModifier string
Doc string
ElemType string
ElemIsPrimitive bool
ElemPrimitiveType string
Length int
Name string
Package string
SourceFile string
VdlTypeName string
VdlTypeString string
ZeroValue string
}{
FileDoc: tdef.File.Package.FileDoc,
AccessModifier: accessModifierForName(tdef.Name),
Doc: javaDocInComment(tdef.Doc),
ElemType: elemType,
ElemIsPrimitive: !isClass(tdef.Type.Elem(), env),
ElemPrimitiveType: javaType(tdef.Type.Elem(), false, env),
Length: tdef.Type.Len(),
Name: javaTypeName,
Package: javaPath(javaGenPkgPath(tdef.File.Package.GenPath)),
SourceFile: tdef.File.BaseName,
VdlTypeName: tdef.Type.Name(),
VdlTypeString: tdef.Type.String(),
ZeroValue: zeroValue,
}
var buf bytes.Buffer
err := parseTmpl("array", arrayTmpl).Execute(&buf, data)
if err != nil {
log.Fatalf("vdl: couldn't execute array template: %v", err)
}
return JavaFileInfo{
Name: javaTypeName + ".java",
Data: buf.Bytes(),
}
}
vdl/java: fix array length visibility of non-public classes
Change length of a fixed-length array from public static field
to class annotation, as through Java reflection you can see
all type information, but can't get the actual value.
Change-Id: Ie662a450165a2f92f60e7f8c7b23a4fe89312966
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package java
import (
"bytes"
"fmt"
"log"
"strings"
"v.io/x/ref/lib/vdl/compile"
"v.io/x/ref/lib/vdl/vdlutil"
)
const arrayTmpl = header + `
// Source: {{.SourceFile}}
package {{.Package}};
/**
* type {{.Name}} {{.VdlTypeString}} {{.Doc}}
**/
@io.v.v23.vdl.GeneratedFromVdl(name = "{{.VdlTypeName}}")
@io.v.v23.vdl.ArrayLength({{.Length}})
{{ .AccessModifier }} final class {{.Name}} extends io.v.v23.vdl.VdlArray<{{.ElemType}}> {
private static final long serialVersionUID = 1L;
public static final io.v.v23.vdl.VdlType VDL_TYPE =
io.v.v23.vdl.Types.getVdlTypeFromReflect({{.Name}}.class);
public {{.Name}}({{.ElemType}}[] arr) {
super(VDL_TYPE, arr);
}
public {{.Name}}() {
this({{.ZeroValue}});
}
{{ if .ElemIsPrimitive }}
public {{.Name}}({{ .ElemPrimitiveType }}[] arr) {
super(VDL_TYPE, convert(arr));
}
private static {{ .ElemType }}[] convert({{ .ElemPrimitiveType }}[] arr) {
final {{ .ElemType }}[] ret = new {{ .ElemType }}[arr.length];
for (int i = 0; i < arr.length; ++i) {
ret[i] = arr[i];
}
return ret;
}
{{ end }}
}
`
// genJavaArrayFile generates the Java class file for the provided named array type.
func genJavaArrayFile(tdef *compile.TypeDef, env *compile.Env) JavaFileInfo {
javaTypeName := vdlutil.FirstRuneToUpper(tdef.Name)
elemType := javaType(tdef.Type.Elem(), true, env)
elems := strings.TrimSuffix(strings.Repeat(javaZeroValue(tdef.Type.Elem(), env)+", ", tdef.Type.Len()), ", ")
zeroValue := fmt.Sprintf("new %s[] {%s}", elemType, elems)
data := struct {
FileDoc string
AccessModifier string
Doc string
ElemType string
ElemIsPrimitive bool
ElemPrimitiveType string
Length int
Name string
Package string
SourceFile string
VdlTypeName string
VdlTypeString string
ZeroValue string
}{
FileDoc: tdef.File.Package.FileDoc,
AccessModifier: accessModifierForName(tdef.Name),
Doc: javaDocInComment(tdef.Doc),
ElemType: elemType,
ElemIsPrimitive: !isClass(tdef.Type.Elem(), env),
ElemPrimitiveType: javaType(tdef.Type.Elem(), false, env),
Length: tdef.Type.Len(),
Name: javaTypeName,
Package: javaPath(javaGenPkgPath(tdef.File.Package.GenPath)),
SourceFile: tdef.File.BaseName,
VdlTypeName: tdef.Type.Name(),
VdlTypeString: tdef.Type.String(),
ZeroValue: zeroValue,
}
var buf bytes.Buffer
err := parseTmpl("array", arrayTmpl).Execute(&buf, data)
if err != nil {
log.Fatalf("vdl: couldn't execute array template: %v", err)
}
return JavaFileInfo{
Name: javaTypeName + ".java",
Data: buf.Bytes(),
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Linux system calls.
// This file is compiled as ordinary Go code,
// but it is also input to mksyscall,
// which parses the //sys lines and generates system call stubs.
// Note that sometimes we use a lowercase //sys name and
// wrap it in our own nicer implementation.
package unix
import (
"encoding/binary"
"net"
"runtime"
"syscall"
"unsafe"
)
/*
* Wrapped
*/
func Access(path string, mode uint32) (err error) {
return Faccessat(AT_FDCWD, path, mode, 0)
}
func Chmod(path string, mode uint32) (err error) {
return Fchmodat(AT_FDCWD, path, mode, 0)
}
func Chown(path string, uid int, gid int) (err error) {
return Fchownat(AT_FDCWD, path, uid, gid, 0)
}
func Creat(path string, mode uint32) (fd int, err error) {
return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
}
//sys fchmodat(dirfd int, path string, mode uint32) (err error)
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
// Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
// and check the flags. Otherwise the mode would be applied to the symlink
// destination which is not what the user expects.
if flags&^AT_SYMLINK_NOFOLLOW != 0 {
return EINVAL
} else if flags&AT_SYMLINK_NOFOLLOW != 0 {
return EOPNOTSUPP
}
return fchmodat(dirfd, path, mode)
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
// ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible.
// IoctlSetPointerInt performs an ioctl operation which sets an
// integer value on fd, using the specified request number. The ioctl
// argument is called with a pointer to the integer value, rather than
// passing the integer value directly.
func IoctlSetPointerInt(fd int, req uint, value int) error {
v := int32(value)
return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
}
// IoctlSetInt performs an ioctl operation which sets an integer value
// on fd, using the specified request number.
func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
func ioctlSetWinsize(fd int, req uint, value *Winsize) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func IoctlSetRTCTime(fd int, value *RTCTime) error {
err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
}
// IoctlGetInt performs an ioctl operation which gets an integer value
// from fd, using the specified request number.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
var value Termios
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func IoctlGetRTCTime(fd int) (*RTCTime, error) {
var value RTCTime
err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value)))
return &value, err
}
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
func Link(oldpath string, newpath string) (err error) {
return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
}
func Mkdir(path string, mode uint32) (err error) {
return Mkdirat(AT_FDCWD, path, mode)
}
func Mknod(path string, mode uint32, dev int) (err error) {
return Mknodat(AT_FDCWD, path, mode, dev)
}
func Open(path string, mode int, perm uint32) (fd int, err error) {
return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm)
}
//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
return openat(dirfd, path, flags|O_LARGEFILE, mode)
}
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
if len(fds) == 0 {
return ppoll(nil, 0, timeout, sigmask)
}
return ppoll(&fds[0], len(fds), timeout, sigmask)
}
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
func Readlink(path string, buf []byte) (n int, err error) {
return Readlinkat(AT_FDCWD, path, buf)
}
func Rename(oldpath string, newpath string) (err error) {
return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
}
func Rmdir(path string) error {
return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
}
//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
func Symlink(oldpath string, newpath string) (err error) {
return Symlinkat(oldpath, AT_FDCWD, newpath)
}
func Unlink(path string) error {
return Unlinkat(AT_FDCWD, path, 0)
}
//sys Unlinkat(dirfd int, path string, flags int) (err error)
func Utimes(path string, tv []Timeval) error {
if tv == nil {
err := utimensat(AT_FDCWD, path, nil, 0)
if err != ENOSYS {
return err
}
return utimes(path, nil)
}
if len(tv) != 2 {
return EINVAL
}
var ts [2]Timespec
ts[0] = NsecToTimespec(TimevalToNsec(tv[0]))
ts[1] = NsecToTimespec(TimevalToNsec(tv[1]))
err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
if err != ENOSYS {
return err
}
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
func UtimesNano(path string, ts []Timespec) error {
if ts == nil {
err := utimensat(AT_FDCWD, path, nil, 0)
if err != ENOSYS {
return err
}
return utimes(path, nil)
}
if len(ts) != 2 {
return EINVAL
}
err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
if err != ENOSYS {
return err
}
// If the utimensat syscall isn't available (utimensat was added to Linux
// in 2.6.22, Released, 8 July 2007) then fall back to utimes
var tv [2]Timeval
for i := 0; i < 2; i++ {
tv[i] = NsecToTimeval(TimespecToNsec(ts[i]))
}
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
if ts == nil {
return utimensat(dirfd, path, nil, flags)
}
if len(ts) != 2 {
return EINVAL
}
return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
}
func Futimesat(dirfd int, path string, tv []Timeval) error {
if tv == nil {
return futimesat(dirfd, path, nil)
}
if len(tv) != 2 {
return EINVAL
}
return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
func Futimes(fd int, tv []Timeval) (err error) {
// Believe it or not, this is the best we can do on Linux
// (and is what glibc does).
return Utimes("/proc/self/fd/"+itoa(fd), tv)
}
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error)
func Getwd() (wd string, err error) {
var buf [PathMax]byte
n, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
// Getcwd returns the number of bytes written to buf, including the NUL.
if n < 1 || n > len(buf) || buf[n-1] != 0 {
return "", EINVAL
}
return string(buf[0 : n-1]), nil
}
func Getgroups() (gids []int, err error) {
n, err := getgroups(0, nil)
if err != nil {
return nil, err
}
if n == 0 {
return nil, nil
}
// Sanity check group count. Max is 1<<16 on Linux.
if n < 0 || n > 1<<20 {
return nil, EINVAL
}
a := make([]_Gid_t, n)
n, err = getgroups(n, &a[0])
if err != nil {
return nil, err
}
gids = make([]int, n)
for i, v := range a[0:n] {
gids[i] = int(v)
}
return
}
func Setgroups(gids []int) (err error) {
if len(gids) == 0 {
return setgroups(0, nil)
}
a := make([]_Gid_t, len(gids))
for i, v := range gids {
a[i] = _Gid_t(v)
}
return setgroups(len(a), &a[0])
}
type WaitStatus uint32
// Wait status is 7 bits at bottom, either 0 (exited),
// 0x7F (stopped), or a signal number that caused an exit.
// The 0x80 bit is whether there was a core dump.
// An extra number (exit code, signal causing a stop)
// is in the high bits. At least that's the idea.
// There are various irregularities. For example, the
// "continued" status is 0xFFFF, distinguishing itself
// from stopped via the core dump bit.
const (
mask = 0x7F
core = 0x80
exited = 0x00
stopped = 0x7F
shift = 8
)
func (w WaitStatus) Exited() bool { return w&mask == exited }
func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
func (w WaitStatus) Continued() bool { return w == 0xFFFF }
func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
func (w WaitStatus) ExitStatus() int {
if !w.Exited() {
return -1
}
return int(w>>shift) & 0xFF
}
func (w WaitStatus) Signal() syscall.Signal {
if !w.Signaled() {
return -1
}
return syscall.Signal(w & mask)
}
func (w WaitStatus) StopSignal() syscall.Signal {
if !w.Stopped() {
return -1
}
return syscall.Signal(w>>shift) & 0xFF
}
func (w WaitStatus) TrapCause() int {
if w.StopSignal() != SIGTRAP {
return -1
}
return int(w>>shift) >> 8
}
//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
var status _C_int
wpid, err = wait4(pid, &status, options, rusage)
if wstatus != nil {
*wstatus = WaitStatus(status)
}
return
}
func Mkfifo(path string, mode uint32) error {
return Mknod(path, mode|S_IFIFO, 0)
}
func Mkfifoat(dirfd int, path string, mode uint32) error {
return Mknodat(dirfd, path, mode|S_IFIFO, 0)
}
func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return nil, 0, EINVAL
}
sa.raw.Family = AF_INET
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
}
func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return nil, 0, EINVAL
}
sa.raw.Family = AF_INET6
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
sa.raw.Scope_id = sa.ZoneId
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
}
func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
name := sa.Name
n := len(name)
if n >= len(sa.raw.Path) {
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
for i := 0; i < n; i++ {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
sl := _Socklen(2)
if n > 0 {
sl += _Socklen(n) + 1
}
if sa.raw.Path[0] == '@' {
sa.raw.Path[0] = 0
// Don't count trailing NUL for abstract address.
sl--
}
return unsafe.Pointer(&sa.raw), sl, nil
}
// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets.
type SockaddrLinklayer struct {
Protocol uint16
Ifindex int
Hatype uint16
Pkttype uint8
Halen uint8
Addr [8]byte
raw RawSockaddrLinklayer
}
func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
return nil, 0, EINVAL
}
sa.raw.Family = AF_PACKET
sa.raw.Protocol = sa.Protocol
sa.raw.Ifindex = int32(sa.Ifindex)
sa.raw.Hatype = sa.Hatype
sa.raw.Pkttype = sa.Pkttype
sa.raw.Halen = sa.Halen
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
}
// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets.
type SockaddrNetlink struct {
Family uint16
Pad uint16
Pid uint32
Groups uint32
raw RawSockaddrNetlink
}
func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_NETLINK
sa.raw.Pad = sa.Pad
sa.raw.Pid = sa.Pid
sa.raw.Groups = sa.Groups
return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
}
// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the HCI protocol.
type SockaddrHCI struct {
Dev uint16
Channel uint16
raw RawSockaddrHCI
}
func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
sa.raw.Dev = sa.Dev
sa.raw.Channel = sa.Channel
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
}
// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the L2CAP protocol.
type SockaddrL2 struct {
PSM uint16
CID uint16
Addr [6]uint8
AddrType uint8
raw RawSockaddrL2
}
func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
cid[0] = byte(sa.CID)
cid[1] = byte(sa.CID >> 8)
sa.raw.Bdaddr_type = sa.AddrType
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
}
// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the RFCOMM protocol.
//
// Server example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
// })
// _ = Listen(fd, 1)
// nfd, sa, _ := Accept(fd)
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
// Read(nfd, buf)
//
// Client example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = Connect(fd, &SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
// })
// Write(fd, []byte(`hello`))
type SockaddrRFCOMM struct {
// Addr represents a bluetooth address, byte ordering is little-endian.
Addr [6]uint8
// Channel is a designated bluetooth channel, only 1-30 are available for use.
// Since Linux 2.6.7 and further zero value is the first available channel.
Channel uint8
raw RawSockaddrRFCOMM
}
func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
sa.raw.Channel = sa.Channel
sa.raw.Bdaddr = sa.Addr
return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil
}
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
// The RxID and TxID fields are used for transport protocol addressing in
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
//
// The SockaddrCAN struct must be bound to the socket file descriptor
// using Bind before the CAN socket can be used.
//
// // Read one raw CAN frame
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
// addr := &SockaddrCAN{Ifindex: index}
// Bind(fd, addr)
// frame := make([]byte, 16)
// Read(fd, frame)
//
// The full SocketCAN documentation can be found in the linux kernel
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
type SockaddrCAN struct {
Ifindex int
RxID uint32
TxID uint32
raw RawSockaddrCAN
}
func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
return nil, 0, EINVAL
}
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
}
// SockaddrALG implements the Sockaddr interface for AF_ALG type sockets.
// SockaddrALG enables userspace access to the Linux kernel's cryptography
// subsystem. The Type and Name fields specify which type of hash or cipher
// should be used with a given socket.
//
// To create a file descriptor that provides access to a hash or cipher, both
// Bind and Accept must be used. Once the setup process is complete, input
// data can be written to the socket, processed by the kernel, and then read
// back as hash output or ciphertext.
//
// Here is an example of using an AF_ALG socket with SHA1 hashing.
// The initial socket setup process is as follows:
//
// // Open a socket to perform SHA1 hashing.
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
// unix.Bind(fd, addr)
// // Note: unix.Accept does not work at this time; must invoke accept()
// // manually using unix.Syscall.
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
//
// Once a file descriptor has been returned from Accept, it may be used to
// perform SHA1 hashing. The descriptor is not safe for concurrent use, but
// may be re-used repeatedly with subsequent Write and Read operations.
//
// When hashing a small byte slice or string, a single Write and Read may
// be used:
//
// // Assume hashfd is already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash an input string and read the results. Each Write discards
// // previous hash state. Read always reads the current state.
// b := make([]byte, 20)
// for i := 0; i < 2; i++ {
// io.WriteString(hash, "Hello, world.")
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// }
// // Output:
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
//
// For hashing larger byte slices, or byte streams such as those read from
// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update
// the hash digest instead of creating a new one for a given chunk and finalizing it.
//
// // Assume hashfd and addr are already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash the contents of a file.
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
// b := make([]byte, 4096)
// for {
// n, err := f.Read(b)
// if err == io.EOF {
// break
// }
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
// }
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
//
// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html.
type SockaddrALG struct {
Type string
Name string
Feature uint32
Mask uint32
raw RawSockaddrALG
}
func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) {
// Leave room for NUL byte terminator.
if len(sa.Type) > 13 {
return nil, 0, EINVAL
}
if len(sa.Name) > 63 {
return nil, 0, EINVAL
}
sa.raw.Family = AF_ALG
sa.raw.Feat = sa.Feature
sa.raw.Mask = sa.Mask
typ, err := ByteSliceFromString(sa.Type)
if err != nil {
return nil, 0, err
}
name, err := ByteSliceFromString(sa.Name)
if err != nil {
return nil, 0, err
}
copy(sa.raw.Type[:], typ)
copy(sa.raw.Name[:], name)
return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil
}
// SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets.
// SockaddrVM provides access to Linux VM sockets: a mechanism that enables
// bidirectional communication between a hypervisor and its guest virtual
// machines.
type SockaddrVM struct {
// CID and Port specify a context ID and port address for a VM socket.
// Guests have a unique CID, and hosts may have a well-known CID of:
// - VMADDR_CID_HYPERVISOR: refers to the hypervisor process.
// - VMADDR_CID_HOST: refers to other processes on the host.
CID uint32
Port uint32
raw RawSockaddrVM
}
func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_VSOCK
sa.raw.Port = sa.Port
sa.raw.Cid = sa.CID
return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
}
type SockaddrXDP struct {
Flags uint16
Ifindex uint32
QueueID uint32
SharedUmemFD uint32
raw RawSockaddrXDP
}
func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_XDP
sa.raw.Flags = sa.Flags
sa.raw.Ifindex = sa.Ifindex
sa.raw.Queue_id = sa.QueueID
sa.raw.Shared_umem_fd = sa.SharedUmemFD
return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil
}
// This constant mirrors the #define of PX_PROTO_OE in
// linux/if_pppox.h. We're defining this by hand here instead of
// autogenerating through mkerrors.sh because including
// linux/if_pppox.h causes some declaration conflicts with other
// includes (linux/if_pppox.h includes linux/in.h, which conflicts
// with netinet/in.h). Given that we only need a single zero constant
// out of that file, it's cleaner to just define it by hand here.
const px_proto_oe = 0
type SockaddrPPPoE struct {
SID uint16
Remote net.HardwareAddr
Dev string
raw RawSockaddrPPPoX
}
func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
if len(sa.Remote) != 6 {
return nil, 0, EINVAL
}
if len(sa.Dev) > IFNAMSIZ-1 {
return nil, 0, EINVAL
}
*(*uint16)(unsafe.Pointer(&sa.raw[0])) = AF_PPPOX
// This next field is in host-endian byte order. We can't use the
// same unsafe pointer cast as above, because this value is not
// 32-bit aligned and some architectures don't allow unaligned
// access.
//
// However, the value of px_proto_oe is 0, so we can use
// encoding/binary helpers to write the bytes without worrying
// about the ordering.
binary.BigEndian.PutUint32(sa.raw[2:6], px_proto_oe)
// This field is deliberately big-endian, unlike the previous
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
for i := 14; i < 14+IFNAMSIZ; i++ {
sa.raw[i] = 0
}
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
sa := new(SockaddrNetlink)
sa.Family = pp.Family
sa.Pad = pp.Pad
sa.Pid = pp.Pid
sa.Groups = pp.Groups
return sa, nil
case AF_PACKET:
pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
sa := new(SockaddrLinklayer)
sa.Protocol = pp.Protocol
sa.Ifindex = int(pp.Ifindex)
sa.Hatype = pp.Hatype
sa.Pkttype = pp.Pkttype
sa.Halen = pp.Halen
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_UNIX:
pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
sa := new(SockaddrUnix)
if pp.Path[0] == 0 {
// "Abstract" Unix domain socket.
// Rewrite leading NUL as @ for textual display.
// (This is the standard convention.)
// Not friendly to overwrite in place,
// but the callers below don't care.
pp.Path[0] = '@'
}
// Assume path ends at NUL.
// This is not technically the Linux semantics for
// abstract Unix domain sockets--they are supposed
// to be uninterpreted fixed-size binary blobs--but
// everyone uses this convention.
n := 0
for n < len(pp.Path) && pp.Path[n] != 0 {
n++
}
bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
return sa, nil
case AF_INET:
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
sa := new(SockaddrInet4)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_INET6:
pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
sa := new(SockaddrInet6)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
sa.ZoneId = pp.Scope_id
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_VSOCK:
pp := (*RawSockaddrVM)(unsafe.Pointer(rsa))
sa := &SockaddrVM{
CID: pp.Cid,
Port: pp.Port,
}
return sa, nil
case AF_BLUETOOTH:
proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL)
if err != nil {
return nil, err
}
// only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections
switch proto {
case BTPROTO_L2CAP:
pp := (*RawSockaddrL2)(unsafe.Pointer(rsa))
sa := &SockaddrL2{
PSM: pp.Psm,
CID: pp.Cid,
Addr: pp.Bdaddr,
AddrType: pp.Bdaddr_type,
}
return sa, nil
case BTPROTO_RFCOMM:
pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa))
sa := &SockaddrRFCOMM{
Channel: pp.Channel,
Addr: pp.Bdaddr,
}
return sa, nil
}
case AF_XDP:
pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa))
sa := &SockaddrXDP{
Flags: pp.Flags,
Ifindex: pp.Ifindex,
QueueID: pp.Queue_id,
SharedUmemFD: pp.Shared_umem_fd,
}
return sa, nil
case AF_PPPOX:
pp := (*RawSockaddrPPPoX)(unsafe.Pointer(rsa))
if binary.BigEndian.Uint32(pp[2:6]) != px_proto_oe {
return nil, EINVAL
}
sa := &SockaddrPPPoE{
SID: binary.BigEndian.Uint16(pp[6:8]),
Remote: net.HardwareAddr(pp[8:14]),
}
for i := 14; i < 14+IFNAMSIZ; i++ {
if pp[i] == 0 {
sa.Dev = string(pp[14:i])
break
}
}
return sa, nil
}
return nil, EAFNOSUPPORT
}
func Accept(fd int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept(fd, &rsa, &len)
if err != nil {
return
}
sa, err = anyToSockaddr(fd, &rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept4(fd, &rsa, &len, flags)
if err != nil {
return
}
if len > SizeofSockaddrAny {
panic("RawSockaddrAny too small")
}
sa, err = anyToSockaddr(fd, &rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
func Getsockname(fd int) (sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
if err = getsockname(fd, &rsa, &len); err != nil {
return
}
return anyToSockaddr(fd, &rsa)
}
func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
var value IPMreqn
vallen := _Socklen(SizeofIPMreqn)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
var value Ucred
vallen := _Socklen(SizeofUcred)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
var value TCPInfo
vallen := _Socklen(SizeofTCPInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
buf := make([]byte, 256)
vallen := _Socklen(len(buf))
err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
if err != nil {
if err == ERANGE {
buf = make([]byte, vallen)
err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
}
if err != nil {
return "", err
}
}
return string(buf[:vallen-1]), nil
}
func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
}
// Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html)
// KeyctlInt calls keyctl commands in which each argument is an int.
// These commands are KEYCTL_REVOKE, KEYCTL_CHOWN, KEYCTL_CLEAR, KEYCTL_LINK,
// KEYCTL_UNLINK, KEYCTL_NEGATE, KEYCTL_SET_REQKEY_KEYRING, KEYCTL_SET_TIMEOUT,
// KEYCTL_ASSUME_AUTHORITY, KEYCTL_SESSION_TO_PARENT, KEYCTL_REJECT,
// KEYCTL_INVALIDATE, and KEYCTL_GET_PERSISTENT.
//sys KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlBuffer calls keyctl commands in which the third and fourth
// arguments are a buffer and its length, respectively.
// These commands are KEYCTL_UPDATE, KEYCTL_READ, and KEYCTL_INSTANTIATE.
//sys KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlString calls keyctl commands which return a string.
// These commands are KEYCTL_DESCRIBE and KEYCTL_GET_SECURITY.
func KeyctlString(cmd int, id int) (string, error) {
// We must loop as the string data may change in between the syscalls.
// We could allocate a large buffer here to reduce the chance that the
// syscall needs to be called twice; however, this is unnecessary as
// the performance loss is negligible.
var buffer []byte
for {
// Try to fill the buffer with data
length, err := KeyctlBuffer(cmd, id, buffer, 0)
if err != nil {
return "", err
}
// Check if the data was written
if length <= len(buffer) {
// Exclude the null terminator
return string(buffer[:length-1]), nil
}
// Make a bigger buffer if needed
buffer = make([]byte, length)
}
}
// Keyctl commands with special signatures.
// KeyctlGetKeyringID implements the KEYCTL_GET_KEYRING_ID command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_get_keyring_ID.3.html
func KeyctlGetKeyringID(id int, create bool) (ringid int, err error) {
createInt := 0
if create {
createInt = 1
}
return KeyctlInt(KEYCTL_GET_KEYRING_ID, id, createInt, 0, 0)
}
// KeyctlSetperm implements the KEYCTL_SETPERM command. The perm value is the
// key handle permission mask as described in the "keyctl setperm" section of
// http://man7.org/linux/man-pages/man1/keyctl.1.html.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_setperm.3.html
func KeyctlSetperm(id int, perm uint32) error {
_, err := KeyctlInt(KEYCTL_SETPERM, id, int(perm), 0, 0)
return err
}
//sys keyctlJoin(cmd int, arg2 string) (ret int, err error) = SYS_KEYCTL
// KeyctlJoinSessionKeyring implements the KEYCTL_JOIN_SESSION_KEYRING command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_join_session_keyring.3.html
func KeyctlJoinSessionKeyring(name string) (ringid int, err error) {
return keyctlJoin(KEYCTL_JOIN_SESSION_KEYRING, name)
}
//sys keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlSearch implements the KEYCTL_SEARCH command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_search.3.html
func KeyctlSearch(ringid int, keyType, description string, destRingid int) (id int, err error) {
return keyctlSearch(KEYCTL_SEARCH, ringid, keyType, description, destRingid)
}
//sys keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) = SYS_KEYCTL
// KeyctlInstantiateIOV implements the KEYCTL_INSTANTIATE_IOV command. This
// command is similar to KEYCTL_INSTANTIATE, except that the payload is a slice
// of Iovec (each of which represents a buffer) instead of a single buffer.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_instantiate_iov.3.html
func KeyctlInstantiateIOV(id int, payload []Iovec, ringid int) error {
return keyctlIOV(KEYCTL_INSTANTIATE_IOV, id, payload, ringid)
}
//sys keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) = SYS_KEYCTL
// KeyctlDHCompute implements the KEYCTL_DH_COMPUTE command. This command
// computes a Diffie-Hellman shared secret based on the provide params. The
// secret is written to the provided buffer and the returned size is the number
// of bytes written (returning an error if there is insufficient space in the
// buffer). If a nil buffer is passed in, this function returns the minimum
// buffer length needed to store the appropriate data. Note that this differs
// from KEYCTL_READ's behavior which always returns the requested payload size.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_dh_compute.3.html
func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error) {
return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer)
}
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
var msg Msghdr
var rsa RawSockaddrAny
msg.Name = (*byte)(unsafe.Pointer(&rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
var iov Iovec
if len(p) > 0 {
iov.Base = &p[0]
iov.SetLen(len(p))
}
var dummy byte
if len(oob) > 0 {
if len(p) == 0 {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
return
}
// receive at least one normal byte
if sockType != SOCK_DGRAM {
iov.Base = &dummy
iov.SetLen(1)
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
msg.Iov = &iov
msg.Iovlen = 1
if n, err = recvmsg(fd, &msg, flags); err != nil {
return
}
oobn = int(msg.Controllen)
recvflags = int(msg.Flags)
// source address is only specified if the socket is unconnected
if rsa.Addr.Family != AF_UNSPEC {
from, err = anyToSockaddr(fd, &rsa)
}
return
}
func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
_, err = SendmsgN(fd, p, oob, to, flags)
return
}
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
var ptr unsafe.Pointer
var salen _Socklen
if to != nil {
var err error
ptr, salen, err = to.sockaddr()
if err != nil {
return 0, err
}
}
var msg Msghdr
msg.Name = (*byte)(ptr)
msg.Namelen = uint32(salen)
var iov Iovec
if len(p) > 0 {
iov.Base = &p[0]
iov.SetLen(len(p))
}
var dummy byte
if len(oob) > 0 {
if len(p) == 0 {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
return 0, err
}
// send at least one normal byte
if sockType != SOCK_DGRAM {
iov.Base = &dummy
iov.SetLen(1)
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
msg.Iov = &iov
msg.Iovlen = 1
if n, err = sendmsg(fd, &msg, flags); err != nil {
return 0, err
}
if len(oob) > 0 && len(p) == 0 {
n = 0
}
return n, nil
}
// BindToDevice binds the socket associated with fd to device.
func BindToDevice(fd int, device string) (err error) {
return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
}
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
// The peek requests are machine-size oriented, so we wrap it
// to retrieve arbitrary-length data.
// The ptrace syscall differs from glibc's ptrace.
// Peeks returns the word in *data, not as the return value.
var buf [SizeofPtr]byte
// Leading edge. PEEKTEXT/PEEKDATA don't require aligned
// access (PEEKUSER warns that it might), but if we don't
// align our reads, we might straddle an unmapped page
// boundary and not get the bytes leading up to the page
// boundary.
n := 0
if addr%SizeofPtr != 0 {
err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(out, buf[addr%SizeofPtr:])
out = out[n:]
}
// Remainder.
for len(out) > 0 {
// We use an internal buffer to guarantee alignment.
// It's not documented if this is necessary, but we're paranoid.
err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return n, err
}
copied := copy(out, buf[0:])
n += copied
out = out[copied:]
}
return n, nil
}
func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
}
func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
}
func PtracePeekUser(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKUSR, pid, addr, out)
}
func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
// As for ptracePeek, we need to align our accesses to deal
// with the possibility of straddling an invalid page.
// Leading edge.
n := 0
if addr%SizeofPtr != 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(buf[addr%SizeofPtr:], data)
word := *((*uintptr)(unsafe.Pointer(&buf[0])))
err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word)
if err != nil {
return 0, err
}
data = data[n:]
}
// Interior.
for len(data) > SizeofPtr {
word := *((*uintptr)(unsafe.Pointer(&data[0])))
err = ptrace(pokeReq, pid, addr+uintptr(n), word)
if err != nil {
return n, err
}
n += SizeofPtr
data = data[SizeofPtr:]
}
// Trailing edge.
if len(data) > 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return n, err
}
copy(buf[0:], data)
word := *((*uintptr)(unsafe.Pointer(&buf[0])))
err = ptrace(pokeReq, pid, addr+uintptr(n), word)
if err != nil {
return n, err
}
n += len(data)
}
return n, nil
}
func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
}
func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
}
func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data)
}
func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
}
func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
}
func PtraceSetOptions(pid int, options int) (err error) {
return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
}
func PtraceGetEventMsg(pid int) (msg uint, err error) {
var data _C_long
err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
msg = uint(data)
return
}
func PtraceCont(pid int, signal int) (err error) {
return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
}
func PtraceSyscall(pid int, signal int) (err error) {
return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
}
func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
func Reboot(cmd int) (err error) {
return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
}
func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf)
}
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
// Certain file systems get rather angry and EINVAL if you give
// them an empty string of data, rather than NULL.
if data == "" {
return mount(source, target, fstype, flags, nil)
}
datap, err := BytePtrFromString(data)
if err != nil {
return err
}
return mount(source, target, fstype, flags, datap)
}
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
return sendfile(outfd, infd, offset, count)
}
// Sendto
// Recvfrom
// Socketpair
/*
* Direct access
*/
//sys Acct(path string) (err error)
//sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error)
//sys Adjtimex(buf *Timex) (state int, err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys DeleteModule(name string, flags int) (err error)
//sys Dup(oldfd int) (fd int, err error)
//sys Dup3(oldfd int, newfd int, flags int) (err error)
//sysnb EpollCreate1(flag int) (fd int, err error)
//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2
//sys Exit(code int) = SYS_EXIT_GROUP
//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
//sys Fchdir(fd int) (err error)
//sys Fchmod(fd int, mode uint32) (err error)
//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys Fdatasync(fd int) (err error)
//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error)
//sys FinitModule(fd int, params string, flags int) (err error)
//sys Flistxattr(fd int, dest []byte) (sz int, err error)
//sys Flock(fd int, how int) (err error)
//sys Fremovexattr(fd int, attr string) (err error)
//sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error)
//sys Fsync(fd int) (err error)
//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
//sysnb Getpgid(pid int) (pgid int, err error)
func Getpgrp() (pid int) {
pid, _ = Getpgid(0)
return
}
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sys Getrandom(buf []byte, flags int) (n int, err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
//sys InitModule(moduleImage []byte, params string) (err error)
//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
//sysnb InotifyInit1(flags int) (fd int, err error)
//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
//sysnb Kill(pid int, sig syscall.Signal) (err error)
//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
//sys Lgetxattr(path string, attr string, dest []byte) (sz int, err error)
//sys Listxattr(path string, dest []byte) (sz int, err error)
//sys Llistxattr(path string, dest []byte) (sz int, err error)
//sys Lremovexattr(path string, attr string) (err error)
//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error)
//sys MemfdCreate(name string, flags int) (fd int, err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
//sys read(fd int, p []byte) (n int, err error)
//sys Removexattr(path string, attr string) (err error)
//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
//sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error)
//sys Setdomainname(p []byte) (err error)
//sys Sethostname(p []byte) (err error)
//sysnb Setpgid(pid int, pgid int) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tv *Timeval) (err error)
//sys Setns(fd int, nstype int) (err error)
// issue 1435.
// On linux Setuid and Setgid only affects the current thread, not the process.
// This does not match what most callers expect so we must return an error
// here rather than letting the caller think that the call succeeded.
func Setuid(uid int) (err error) {
return EOPNOTSUPP
}
func Setgid(uid int) (err error) {
return EOPNOTSUPP
}
//sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4
//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync()
//sys Syncfs(fd int) (err error)
//sysnb Sysinfo(info *Sysinfo_t) (err error)
//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error)
//sysnb Times(tms *Tms) (ticks uintptr, err error)
//sysnb Umask(mask int) (oldmask int)
//sysnb Uname(buf *Utsname) (err error)
//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
//sys Unshare(flags int) (err error)
//sys write(fd int, p []byte) (n int, err error)
//sys exitThread(code int) (err error) = SYS_EXIT
//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
// mmap varies by architecture; see syscall_linux_*.go.
//sys munmap(addr uintptr, length uintptr) (err error)
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
//sys Madvise(b []byte, advice int) (err error)
//sys Mprotect(b []byte, prot int) (err error)
//sys Mlock(b []byte) (err error)
//sys Mlockall(flags int) (err error)
//sys Msync(b []byte, flags int) (err error)
//sys Munlock(b []byte) (err error)
//sys Munlockall() (err error)
// Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd,
// using the specified flags.
func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
var p unsafe.Pointer
if len(iovs) > 0 {
p = unsafe.Pointer(&iovs[0])
}
n, _, errno := Syscall6(SYS_VMSPLICE, uintptr(fd), uintptr(p), uintptr(len(iovs)), uintptr(flags), 0, 0)
if errno != 0 {
return 0, syscall.Errno(errno)
}
return int(n), nil
}
//sys faccessat(dirfd int, path string, mode uint32) (err error)
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
return EINVAL
}
// The Linux kernel faccessat system call does not take any flags.
// The glibc faccessat implements the flags itself; see
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD
// Because people naturally expect syscall.Faccessat to act
// like C faccessat, we do the same.
if flags == 0 {
return faccessat(dirfd, path, mode)
}
var st Stat_t
if err := Fstatat(dirfd, path, &st, flags&AT_SYMLINK_NOFOLLOW); err != nil {
return err
}
mode &= 7
if mode == 0 {
return nil
}
var uid int
if flags&AT_EACCESS != 0 {
uid = Geteuid()
} else {
uid = Getuid()
}
if uid == 0 {
if mode&1 == 0 {
// Root can read and write any file.
return nil
}
if st.Mode&0111 != 0 {
// Root can execute any file that anybody can execute.
return nil
}
return EACCES
}
var fmode uint32
if uint32(uid) == st.Uid {
fmode = (st.Mode >> 6) & 7
} else {
var gid int
if flags&AT_EACCESS != 0 {
gid = Getegid()
} else {
gid = Getgid()
}
if uint32(gid) == st.Gid {
fmode = (st.Mode >> 3) & 7
} else {
fmode = st.Mode & 7
}
}
if fmode&mode == mode {
return nil
}
return EACCES
}
/*
* Unimplemented
*/
// AfsSyscall
// Alarm
// ArchPrctl
// Brk
// Capget
// Capset
// ClockNanosleep
// ClockSettime
// Clone
// EpollCtlOld
// EpollPwait
// EpollWaitOld
// Execve
// Fork
// Futex
// GetKernelSyms
// GetMempolicy
// GetRobustList
// GetThreadArea
// Getitimer
// Getpmsg
// IoCancel
// IoDestroy
// IoGetevents
// IoSetup
// IoSubmit
// IoprioGet
// IoprioSet
// KexecLoad
// LookupDcookie
// Mbind
// MigratePages
// Mincore
// ModifyLdt
// Mount
// MovePages
// MqGetsetattr
// MqNotify
// MqOpen
// MqTimedreceive
// MqTimedsend
// MqUnlink
// Mremap
// Msgctl
// Msgget
// Msgrcv
// Msgsnd
// Nfsservctl
// Personality
// Pselect6
// Ptrace
// Putpmsg
// Quotactl
// Readahead
// Readv
// RemapFilePages
// RestartSyscall
// RtSigaction
// RtSigpending
// RtSigprocmask
// RtSigqueueinfo
// RtSigreturn
// RtSigsuspend
// RtSigtimedwait
// SchedGetPriorityMax
// SchedGetPriorityMin
// SchedGetparam
// SchedGetscheduler
// SchedRrGetInterval
// SchedSetparam
// SchedYield
// Security
// Semctl
// Semget
// Semop
// Semtimedop
// SetMempolicy
// SetRobustList
// SetThreadArea
// SetTidAddress
// Shmat
// Shmctl
// Shmdt
// Shmget
// Sigaltstack
// Swapoff
// Swapon
// Sysfs
// TimerCreate
// TimerDelete
// TimerGetoverrun
// TimerGettime
// TimerSettime
// Timerfd
// Tkill (obsolete)
// Tuxcall
// Umount2
// Uselib
// Utimensat
// Vfork
// Vhangup
// Vserver
// Waitid
// _Sysctl
unix: add SetsockoptSockFprog on Linux for attaching BPF filters
Adding this helper enables me to eliminate some nasty code which
invokes unix.Syscall6 directly, and which has 32-bit shims for
invoking setsockopt properly on 32-bit Linux.
Change-Id: I53ef411e4c96cef557999261760cddc00df58d8a
Reviewed-on: https://go-review.googlesource.com/c/163303
Run-TryBot: Matt Layher <76528ff2e917253aa2211f68e4ca60b5b846dcf5@gmail.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Tobias Klauser <0a68dd4915066ec5d3f81f75a828fee53dcc8822@gmail.com>
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Linux system calls.
// This file is compiled as ordinary Go code,
// but it is also input to mksyscall,
// which parses the //sys lines and generates system call stubs.
// Note that sometimes we use a lowercase //sys name and
// wrap it in our own nicer implementation.
package unix
import (
"encoding/binary"
"net"
"runtime"
"syscall"
"unsafe"
)
/*
* Wrapped
*/
func Access(path string, mode uint32) (err error) {
return Faccessat(AT_FDCWD, path, mode, 0)
}
func Chmod(path string, mode uint32) (err error) {
return Fchmodat(AT_FDCWD, path, mode, 0)
}
func Chown(path string, uid int, gid int) (err error) {
return Fchownat(AT_FDCWD, path, uid, gid, 0)
}
func Creat(path string, mode uint32) (fd int, err error) {
return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
}
//sys fchmodat(dirfd int, path string, mode uint32) (err error)
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
// Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
// and check the flags. Otherwise the mode would be applied to the symlink
// destination which is not what the user expects.
if flags&^AT_SYMLINK_NOFOLLOW != 0 {
return EINVAL
} else if flags&AT_SYMLINK_NOFOLLOW != 0 {
return EOPNOTSUPP
}
return fchmodat(dirfd, path, mode)
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
// ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible.
// IoctlSetPointerInt performs an ioctl operation which sets an
// integer value on fd, using the specified request number. The ioctl
// argument is called with a pointer to the integer value, rather than
// passing the integer value directly.
func IoctlSetPointerInt(fd int, req uint, value int) error {
v := int32(value)
return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
}
// IoctlSetInt performs an ioctl operation which sets an integer value
// on fd, using the specified request number.
func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
func ioctlSetWinsize(fd int, req uint, value *Winsize) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func IoctlSetRTCTime(fd int, value *RTCTime) error {
err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
}
// IoctlGetInt performs an ioctl operation which gets an integer value
// from fd, using the specified request number.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
var value Termios
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func IoctlGetRTCTime(fd int) (*RTCTime, error) {
var value RTCTime
err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value)))
return &value, err
}
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
func Link(oldpath string, newpath string) (err error) {
return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
}
func Mkdir(path string, mode uint32) (err error) {
return Mkdirat(AT_FDCWD, path, mode)
}
func Mknod(path string, mode uint32, dev int) (err error) {
return Mknodat(AT_FDCWD, path, mode, dev)
}
func Open(path string, mode int, perm uint32) (fd int, err error) {
return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm)
}
//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
return openat(dirfd, path, flags|O_LARGEFILE, mode)
}
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
if len(fds) == 0 {
return ppoll(nil, 0, timeout, sigmask)
}
return ppoll(&fds[0], len(fds), timeout, sigmask)
}
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
func Readlink(path string, buf []byte) (n int, err error) {
return Readlinkat(AT_FDCWD, path, buf)
}
func Rename(oldpath string, newpath string) (err error) {
return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
}
func Rmdir(path string) error {
return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
}
//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
func Symlink(oldpath string, newpath string) (err error) {
return Symlinkat(oldpath, AT_FDCWD, newpath)
}
func Unlink(path string) error {
return Unlinkat(AT_FDCWD, path, 0)
}
//sys Unlinkat(dirfd int, path string, flags int) (err error)
func Utimes(path string, tv []Timeval) error {
if tv == nil {
err := utimensat(AT_FDCWD, path, nil, 0)
if err != ENOSYS {
return err
}
return utimes(path, nil)
}
if len(tv) != 2 {
return EINVAL
}
var ts [2]Timespec
ts[0] = NsecToTimespec(TimevalToNsec(tv[0]))
ts[1] = NsecToTimespec(TimevalToNsec(tv[1]))
err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
if err != ENOSYS {
return err
}
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
func UtimesNano(path string, ts []Timespec) error {
if ts == nil {
err := utimensat(AT_FDCWD, path, nil, 0)
if err != ENOSYS {
return err
}
return utimes(path, nil)
}
if len(ts) != 2 {
return EINVAL
}
err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
if err != ENOSYS {
return err
}
// If the utimensat syscall isn't available (utimensat was added to Linux
// in 2.6.22, Released, 8 July 2007) then fall back to utimes
var tv [2]Timeval
for i := 0; i < 2; i++ {
tv[i] = NsecToTimeval(TimespecToNsec(ts[i]))
}
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
if ts == nil {
return utimensat(dirfd, path, nil, flags)
}
if len(ts) != 2 {
return EINVAL
}
return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
}
func Futimesat(dirfd int, path string, tv []Timeval) error {
if tv == nil {
return futimesat(dirfd, path, nil)
}
if len(tv) != 2 {
return EINVAL
}
return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
}
func Futimes(fd int, tv []Timeval) (err error) {
// Believe it or not, this is the best we can do on Linux
// (and is what glibc does).
return Utimes("/proc/self/fd/"+itoa(fd), tv)
}
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error)
func Getwd() (wd string, err error) {
var buf [PathMax]byte
n, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
// Getcwd returns the number of bytes written to buf, including the NUL.
if n < 1 || n > len(buf) || buf[n-1] != 0 {
return "", EINVAL
}
return string(buf[0 : n-1]), nil
}
func Getgroups() (gids []int, err error) {
n, err := getgroups(0, nil)
if err != nil {
return nil, err
}
if n == 0 {
return nil, nil
}
// Sanity check group count. Max is 1<<16 on Linux.
if n < 0 || n > 1<<20 {
return nil, EINVAL
}
a := make([]_Gid_t, n)
n, err = getgroups(n, &a[0])
if err != nil {
return nil, err
}
gids = make([]int, n)
for i, v := range a[0:n] {
gids[i] = int(v)
}
return
}
func Setgroups(gids []int) (err error) {
if len(gids) == 0 {
return setgroups(0, nil)
}
a := make([]_Gid_t, len(gids))
for i, v := range gids {
a[i] = _Gid_t(v)
}
return setgroups(len(a), &a[0])
}
type WaitStatus uint32
// Wait status is 7 bits at bottom, either 0 (exited),
// 0x7F (stopped), or a signal number that caused an exit.
// The 0x80 bit is whether there was a core dump.
// An extra number (exit code, signal causing a stop)
// is in the high bits. At least that's the idea.
// There are various irregularities. For example, the
// "continued" status is 0xFFFF, distinguishing itself
// from stopped via the core dump bit.
const (
mask = 0x7F
core = 0x80
exited = 0x00
stopped = 0x7F
shift = 8
)
func (w WaitStatus) Exited() bool { return w&mask == exited }
func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
func (w WaitStatus) Continued() bool { return w == 0xFFFF }
func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
func (w WaitStatus) ExitStatus() int {
if !w.Exited() {
return -1
}
return int(w>>shift) & 0xFF
}
func (w WaitStatus) Signal() syscall.Signal {
if !w.Signaled() {
return -1
}
return syscall.Signal(w & mask)
}
func (w WaitStatus) StopSignal() syscall.Signal {
if !w.Stopped() {
return -1
}
return syscall.Signal(w>>shift) & 0xFF
}
func (w WaitStatus) TrapCause() int {
if w.StopSignal() != SIGTRAP {
return -1
}
return int(w>>shift) >> 8
}
//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
var status _C_int
wpid, err = wait4(pid, &status, options, rusage)
if wstatus != nil {
*wstatus = WaitStatus(status)
}
return
}
func Mkfifo(path string, mode uint32) error {
return Mknod(path, mode|S_IFIFO, 0)
}
func Mkfifoat(dirfd int, path string, mode uint32) error {
return Mknodat(dirfd, path, mode|S_IFIFO, 0)
}
func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return nil, 0, EINVAL
}
sa.raw.Family = AF_INET
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
}
func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return nil, 0, EINVAL
}
sa.raw.Family = AF_INET6
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
sa.raw.Scope_id = sa.ZoneId
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
}
func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
name := sa.Name
n := len(name)
if n >= len(sa.raw.Path) {
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
for i := 0; i < n; i++ {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
sl := _Socklen(2)
if n > 0 {
sl += _Socklen(n) + 1
}
if sa.raw.Path[0] == '@' {
sa.raw.Path[0] = 0
// Don't count trailing NUL for abstract address.
sl--
}
return unsafe.Pointer(&sa.raw), sl, nil
}
// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets.
type SockaddrLinklayer struct {
Protocol uint16
Ifindex int
Hatype uint16
Pkttype uint8
Halen uint8
Addr [8]byte
raw RawSockaddrLinklayer
}
func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
return nil, 0, EINVAL
}
sa.raw.Family = AF_PACKET
sa.raw.Protocol = sa.Protocol
sa.raw.Ifindex = int32(sa.Ifindex)
sa.raw.Hatype = sa.Hatype
sa.raw.Pkttype = sa.Pkttype
sa.raw.Halen = sa.Halen
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
}
// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets.
type SockaddrNetlink struct {
Family uint16
Pad uint16
Pid uint32
Groups uint32
raw RawSockaddrNetlink
}
func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_NETLINK
sa.raw.Pad = sa.Pad
sa.raw.Pid = sa.Pid
sa.raw.Groups = sa.Groups
return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
}
// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the HCI protocol.
type SockaddrHCI struct {
Dev uint16
Channel uint16
raw RawSockaddrHCI
}
func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
sa.raw.Dev = sa.Dev
sa.raw.Channel = sa.Channel
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
}
// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the L2CAP protocol.
type SockaddrL2 struct {
PSM uint16
CID uint16
Addr [6]uint8
AddrType uint8
raw RawSockaddrL2
}
func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
cid[0] = byte(sa.CID)
cid[1] = byte(sa.CID >> 8)
sa.raw.Bdaddr_type = sa.AddrType
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
}
// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets
// using the RFCOMM protocol.
//
// Server example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
// })
// _ = Listen(fd, 1)
// nfd, sa, _ := Accept(fd)
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
// Read(nfd, buf)
//
// Client example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = Connect(fd, &SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
// })
// Write(fd, []byte(`hello`))
type SockaddrRFCOMM struct {
// Addr represents a bluetooth address, byte ordering is little-endian.
Addr [6]uint8
// Channel is a designated bluetooth channel, only 1-30 are available for use.
// Since Linux 2.6.7 and further zero value is the first available channel.
Channel uint8
raw RawSockaddrRFCOMM
}
func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_BLUETOOTH
sa.raw.Channel = sa.Channel
sa.raw.Bdaddr = sa.Addr
return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil
}
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
// The RxID and TxID fields are used for transport protocol addressing in
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
//
// The SockaddrCAN struct must be bound to the socket file descriptor
// using Bind before the CAN socket can be used.
//
// // Read one raw CAN frame
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
// addr := &SockaddrCAN{Ifindex: index}
// Bind(fd, addr)
// frame := make([]byte, 16)
// Read(fd, frame)
//
// The full SocketCAN documentation can be found in the linux kernel
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
type SockaddrCAN struct {
Ifindex int
RxID uint32
TxID uint32
raw RawSockaddrCAN
}
func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
return nil, 0, EINVAL
}
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
}
// SockaddrALG implements the Sockaddr interface for AF_ALG type sockets.
// SockaddrALG enables userspace access to the Linux kernel's cryptography
// subsystem. The Type and Name fields specify which type of hash or cipher
// should be used with a given socket.
//
// To create a file descriptor that provides access to a hash or cipher, both
// Bind and Accept must be used. Once the setup process is complete, input
// data can be written to the socket, processed by the kernel, and then read
// back as hash output or ciphertext.
//
// Here is an example of using an AF_ALG socket with SHA1 hashing.
// The initial socket setup process is as follows:
//
// // Open a socket to perform SHA1 hashing.
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
// unix.Bind(fd, addr)
// // Note: unix.Accept does not work at this time; must invoke accept()
// // manually using unix.Syscall.
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
//
// Once a file descriptor has been returned from Accept, it may be used to
// perform SHA1 hashing. The descriptor is not safe for concurrent use, but
// may be re-used repeatedly with subsequent Write and Read operations.
//
// When hashing a small byte slice or string, a single Write and Read may
// be used:
//
// // Assume hashfd is already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash an input string and read the results. Each Write discards
// // previous hash state. Read always reads the current state.
// b := make([]byte, 20)
// for i := 0; i < 2; i++ {
// io.WriteString(hash, "Hello, world.")
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// }
// // Output:
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
//
// For hashing larger byte slices, or byte streams such as those read from
// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update
// the hash digest instead of creating a new one for a given chunk and finalizing it.
//
// // Assume hashfd and addr are already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash the contents of a file.
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
// b := make([]byte, 4096)
// for {
// n, err := f.Read(b)
// if err == io.EOF {
// break
// }
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
// }
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
//
// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html.
type SockaddrALG struct {
Type string
Name string
Feature uint32
Mask uint32
raw RawSockaddrALG
}
func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) {
// Leave room for NUL byte terminator.
if len(sa.Type) > 13 {
return nil, 0, EINVAL
}
if len(sa.Name) > 63 {
return nil, 0, EINVAL
}
sa.raw.Family = AF_ALG
sa.raw.Feat = sa.Feature
sa.raw.Mask = sa.Mask
typ, err := ByteSliceFromString(sa.Type)
if err != nil {
return nil, 0, err
}
name, err := ByteSliceFromString(sa.Name)
if err != nil {
return nil, 0, err
}
copy(sa.raw.Type[:], typ)
copy(sa.raw.Name[:], name)
return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil
}
// SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets.
// SockaddrVM provides access to Linux VM sockets: a mechanism that enables
// bidirectional communication between a hypervisor and its guest virtual
// machines.
type SockaddrVM struct {
// CID and Port specify a context ID and port address for a VM socket.
// Guests have a unique CID, and hosts may have a well-known CID of:
// - VMADDR_CID_HYPERVISOR: refers to the hypervisor process.
// - VMADDR_CID_HOST: refers to other processes on the host.
CID uint32
Port uint32
raw RawSockaddrVM
}
func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_VSOCK
sa.raw.Port = sa.Port
sa.raw.Cid = sa.CID
return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
}
type SockaddrXDP struct {
Flags uint16
Ifindex uint32
QueueID uint32
SharedUmemFD uint32
raw RawSockaddrXDP
}
func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_XDP
sa.raw.Flags = sa.Flags
sa.raw.Ifindex = sa.Ifindex
sa.raw.Queue_id = sa.QueueID
sa.raw.Shared_umem_fd = sa.SharedUmemFD
return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil
}
// This constant mirrors the #define of PX_PROTO_OE in
// linux/if_pppox.h. We're defining this by hand here instead of
// autogenerating through mkerrors.sh because including
// linux/if_pppox.h causes some declaration conflicts with other
// includes (linux/if_pppox.h includes linux/in.h, which conflicts
// with netinet/in.h). Given that we only need a single zero constant
// out of that file, it's cleaner to just define it by hand here.
const px_proto_oe = 0
type SockaddrPPPoE struct {
SID uint16
Remote net.HardwareAddr
Dev string
raw RawSockaddrPPPoX
}
func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
if len(sa.Remote) != 6 {
return nil, 0, EINVAL
}
if len(sa.Dev) > IFNAMSIZ-1 {
return nil, 0, EINVAL
}
*(*uint16)(unsafe.Pointer(&sa.raw[0])) = AF_PPPOX
// This next field is in host-endian byte order. We can't use the
// same unsafe pointer cast as above, because this value is not
// 32-bit aligned and some architectures don't allow unaligned
// access.
//
// However, the value of px_proto_oe is 0, so we can use
// encoding/binary helpers to write the bytes without worrying
// about the ordering.
binary.BigEndian.PutUint32(sa.raw[2:6], px_proto_oe)
// This field is deliberately big-endian, unlike the previous
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
for i := 14; i < 14+IFNAMSIZ; i++ {
sa.raw[i] = 0
}
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
sa := new(SockaddrNetlink)
sa.Family = pp.Family
sa.Pad = pp.Pad
sa.Pid = pp.Pid
sa.Groups = pp.Groups
return sa, nil
case AF_PACKET:
pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
sa := new(SockaddrLinklayer)
sa.Protocol = pp.Protocol
sa.Ifindex = int(pp.Ifindex)
sa.Hatype = pp.Hatype
sa.Pkttype = pp.Pkttype
sa.Halen = pp.Halen
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_UNIX:
pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
sa := new(SockaddrUnix)
if pp.Path[0] == 0 {
// "Abstract" Unix domain socket.
// Rewrite leading NUL as @ for textual display.
// (This is the standard convention.)
// Not friendly to overwrite in place,
// but the callers below don't care.
pp.Path[0] = '@'
}
// Assume path ends at NUL.
// This is not technically the Linux semantics for
// abstract Unix domain sockets--they are supposed
// to be uninterpreted fixed-size binary blobs--but
// everyone uses this convention.
n := 0
for n < len(pp.Path) && pp.Path[n] != 0 {
n++
}
bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
return sa, nil
case AF_INET:
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
sa := new(SockaddrInet4)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_INET6:
pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
sa := new(SockaddrInet6)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
sa.ZoneId = pp.Scope_id
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, nil
case AF_VSOCK:
pp := (*RawSockaddrVM)(unsafe.Pointer(rsa))
sa := &SockaddrVM{
CID: pp.Cid,
Port: pp.Port,
}
return sa, nil
case AF_BLUETOOTH:
proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL)
if err != nil {
return nil, err
}
// only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections
switch proto {
case BTPROTO_L2CAP:
pp := (*RawSockaddrL2)(unsafe.Pointer(rsa))
sa := &SockaddrL2{
PSM: pp.Psm,
CID: pp.Cid,
Addr: pp.Bdaddr,
AddrType: pp.Bdaddr_type,
}
return sa, nil
case BTPROTO_RFCOMM:
pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa))
sa := &SockaddrRFCOMM{
Channel: pp.Channel,
Addr: pp.Bdaddr,
}
return sa, nil
}
case AF_XDP:
pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa))
sa := &SockaddrXDP{
Flags: pp.Flags,
Ifindex: pp.Ifindex,
QueueID: pp.Queue_id,
SharedUmemFD: pp.Shared_umem_fd,
}
return sa, nil
case AF_PPPOX:
pp := (*RawSockaddrPPPoX)(unsafe.Pointer(rsa))
if binary.BigEndian.Uint32(pp[2:6]) != px_proto_oe {
return nil, EINVAL
}
sa := &SockaddrPPPoE{
SID: binary.BigEndian.Uint16(pp[6:8]),
Remote: net.HardwareAddr(pp[8:14]),
}
for i := 14; i < 14+IFNAMSIZ; i++ {
if pp[i] == 0 {
sa.Dev = string(pp[14:i])
break
}
}
return sa, nil
}
return nil, EAFNOSUPPORT
}
func Accept(fd int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept(fd, &rsa, &len)
if err != nil {
return
}
sa, err = anyToSockaddr(fd, &rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept4(fd, &rsa, &len, flags)
if err != nil {
return
}
if len > SizeofSockaddrAny {
panic("RawSockaddrAny too small")
}
sa, err = anyToSockaddr(fd, &rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
func Getsockname(fd int) (sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
if err = getsockname(fd, &rsa, &len); err != nil {
return
}
return anyToSockaddr(fd, &rsa)
}
func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
var value IPMreqn
vallen := _Socklen(SizeofIPMreqn)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
var value Ucred
vallen := _Socklen(SizeofUcred)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
var value TCPInfo
vallen := _Socklen(SizeofTCPInfo)
err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, err
}
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
buf := make([]byte, 256)
vallen := _Socklen(len(buf))
err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
if err != nil {
if err == ERANGE {
buf = make([]byte, vallen)
err = getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen)
}
if err != nil {
return "", err
}
}
return string(buf[:vallen-1]), nil
}
func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
}
// SetsockoptSockFprog attaches a classic BPF or an extended BPF program to a
// socket to filter incoming packets. See 'man 7 socket' for usage information.
func SetsockoptSockFprog(fd, level, opt int, fprog *SockFprog) error {
return setsockopt(fd, level, opt, unsafe.Pointer(fprog), unsafe.Sizeof(*fprog))
}
// Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html)
// KeyctlInt calls keyctl commands in which each argument is an int.
// These commands are KEYCTL_REVOKE, KEYCTL_CHOWN, KEYCTL_CLEAR, KEYCTL_LINK,
// KEYCTL_UNLINK, KEYCTL_NEGATE, KEYCTL_SET_REQKEY_KEYRING, KEYCTL_SET_TIMEOUT,
// KEYCTL_ASSUME_AUTHORITY, KEYCTL_SESSION_TO_PARENT, KEYCTL_REJECT,
// KEYCTL_INVALIDATE, and KEYCTL_GET_PERSISTENT.
//sys KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlBuffer calls keyctl commands in which the third and fourth
// arguments are a buffer and its length, respectively.
// These commands are KEYCTL_UPDATE, KEYCTL_READ, and KEYCTL_INSTANTIATE.
//sys KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlString calls keyctl commands which return a string.
// These commands are KEYCTL_DESCRIBE and KEYCTL_GET_SECURITY.
func KeyctlString(cmd int, id int) (string, error) {
// We must loop as the string data may change in between the syscalls.
// We could allocate a large buffer here to reduce the chance that the
// syscall needs to be called twice; however, this is unnecessary as
// the performance loss is negligible.
var buffer []byte
for {
// Try to fill the buffer with data
length, err := KeyctlBuffer(cmd, id, buffer, 0)
if err != nil {
return "", err
}
// Check if the data was written
if length <= len(buffer) {
// Exclude the null terminator
return string(buffer[:length-1]), nil
}
// Make a bigger buffer if needed
buffer = make([]byte, length)
}
}
// Keyctl commands with special signatures.
// KeyctlGetKeyringID implements the KEYCTL_GET_KEYRING_ID command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_get_keyring_ID.3.html
func KeyctlGetKeyringID(id int, create bool) (ringid int, err error) {
createInt := 0
if create {
createInt = 1
}
return KeyctlInt(KEYCTL_GET_KEYRING_ID, id, createInt, 0, 0)
}
// KeyctlSetperm implements the KEYCTL_SETPERM command. The perm value is the
// key handle permission mask as described in the "keyctl setperm" section of
// http://man7.org/linux/man-pages/man1/keyctl.1.html.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_setperm.3.html
func KeyctlSetperm(id int, perm uint32) error {
_, err := KeyctlInt(KEYCTL_SETPERM, id, int(perm), 0, 0)
return err
}
//sys keyctlJoin(cmd int, arg2 string) (ret int, err error) = SYS_KEYCTL
// KeyctlJoinSessionKeyring implements the KEYCTL_JOIN_SESSION_KEYRING command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_join_session_keyring.3.html
func KeyctlJoinSessionKeyring(name string) (ringid int, err error) {
return keyctlJoin(KEYCTL_JOIN_SESSION_KEYRING, name)
}
//sys keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) = SYS_KEYCTL
// KeyctlSearch implements the KEYCTL_SEARCH command.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_search.3.html
func KeyctlSearch(ringid int, keyType, description string, destRingid int) (id int, err error) {
return keyctlSearch(KEYCTL_SEARCH, ringid, keyType, description, destRingid)
}
//sys keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) = SYS_KEYCTL
// KeyctlInstantiateIOV implements the KEYCTL_INSTANTIATE_IOV command. This
// command is similar to KEYCTL_INSTANTIATE, except that the payload is a slice
// of Iovec (each of which represents a buffer) instead of a single buffer.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_instantiate_iov.3.html
func KeyctlInstantiateIOV(id int, payload []Iovec, ringid int) error {
return keyctlIOV(KEYCTL_INSTANTIATE_IOV, id, payload, ringid)
}
//sys keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) = SYS_KEYCTL
// KeyctlDHCompute implements the KEYCTL_DH_COMPUTE command. This command
// computes a Diffie-Hellman shared secret based on the provide params. The
// secret is written to the provided buffer and the returned size is the number
// of bytes written (returning an error if there is insufficient space in the
// buffer). If a nil buffer is passed in, this function returns the minimum
// buffer length needed to store the appropriate data. Note that this differs
// from KEYCTL_READ's behavior which always returns the requested payload size.
// See the full documentation at:
// http://man7.org/linux/man-pages/man3/keyctl_dh_compute.3.html
func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error) {
return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer)
}
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
var msg Msghdr
var rsa RawSockaddrAny
msg.Name = (*byte)(unsafe.Pointer(&rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
var iov Iovec
if len(p) > 0 {
iov.Base = &p[0]
iov.SetLen(len(p))
}
var dummy byte
if len(oob) > 0 {
if len(p) == 0 {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
return
}
// receive at least one normal byte
if sockType != SOCK_DGRAM {
iov.Base = &dummy
iov.SetLen(1)
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
msg.Iov = &iov
msg.Iovlen = 1
if n, err = recvmsg(fd, &msg, flags); err != nil {
return
}
oobn = int(msg.Controllen)
recvflags = int(msg.Flags)
// source address is only specified if the socket is unconnected
if rsa.Addr.Family != AF_UNSPEC {
from, err = anyToSockaddr(fd, &rsa)
}
return
}
func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
_, err = SendmsgN(fd, p, oob, to, flags)
return
}
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
var ptr unsafe.Pointer
var salen _Socklen
if to != nil {
var err error
ptr, salen, err = to.sockaddr()
if err != nil {
return 0, err
}
}
var msg Msghdr
msg.Name = (*byte)(ptr)
msg.Namelen = uint32(salen)
var iov Iovec
if len(p) > 0 {
iov.Base = &p[0]
iov.SetLen(len(p))
}
var dummy byte
if len(oob) > 0 {
if len(p) == 0 {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
if err != nil {
return 0, err
}
// send at least one normal byte
if sockType != SOCK_DGRAM {
iov.Base = &dummy
iov.SetLen(1)
}
}
msg.Control = &oob[0]
msg.SetControllen(len(oob))
}
msg.Iov = &iov
msg.Iovlen = 1
if n, err = sendmsg(fd, &msg, flags); err != nil {
return 0, err
}
if len(oob) > 0 && len(p) == 0 {
n = 0
}
return n, nil
}
// BindToDevice binds the socket associated with fd to device.
func BindToDevice(fd int, device string) (err error) {
return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
}
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
// The peek requests are machine-size oriented, so we wrap it
// to retrieve arbitrary-length data.
// The ptrace syscall differs from glibc's ptrace.
// Peeks returns the word in *data, not as the return value.
var buf [SizeofPtr]byte
// Leading edge. PEEKTEXT/PEEKDATA don't require aligned
// access (PEEKUSER warns that it might), but if we don't
// align our reads, we might straddle an unmapped page
// boundary and not get the bytes leading up to the page
// boundary.
n := 0
if addr%SizeofPtr != 0 {
err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(out, buf[addr%SizeofPtr:])
out = out[n:]
}
// Remainder.
for len(out) > 0 {
// We use an internal buffer to guarantee alignment.
// It's not documented if this is necessary, but we're paranoid.
err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return n, err
}
copied := copy(out, buf[0:])
n += copied
out = out[copied:]
}
return n, nil
}
func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
}
func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
}
func PtracePeekUser(pid int, addr uintptr, out []byte) (count int, err error) {
return ptracePeek(PTRACE_PEEKUSR, pid, addr, out)
}
func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
// As for ptracePeek, we need to align our accesses to deal
// with the possibility of straddling an invalid page.
// Leading edge.
n := 0
if addr%SizeofPtr != 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return 0, err
}
n += copy(buf[addr%SizeofPtr:], data)
word := *((*uintptr)(unsafe.Pointer(&buf[0])))
err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word)
if err != nil {
return 0, err
}
data = data[n:]
}
// Interior.
for len(data) > SizeofPtr {
word := *((*uintptr)(unsafe.Pointer(&data[0])))
err = ptrace(pokeReq, pid, addr+uintptr(n), word)
if err != nil {
return n, err
}
n += SizeofPtr
data = data[SizeofPtr:]
}
// Trailing edge.
if len(data) > 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
if err != nil {
return n, err
}
copy(buf[0:], data)
word := *((*uintptr)(unsafe.Pointer(&buf[0])))
err = ptrace(pokeReq, pid, addr+uintptr(n), word)
if err != nil {
return n, err
}
n += len(data)
}
return n, nil
}
func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
}
func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
}
func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) {
return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data)
}
func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
}
func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
}
func PtraceSetOptions(pid int, options int) (err error) {
return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
}
func PtraceGetEventMsg(pid int) (msg uint, err error) {
var data _C_long
err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
msg = uint(data)
return
}
func PtraceCont(pid int, signal int) (err error) {
return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
}
func PtraceSyscall(pid int, signal int) (err error) {
return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
}
func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
func Reboot(cmd int) (err error) {
return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
}
func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf)
}
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
// Certain file systems get rather angry and EINVAL if you give
// them an empty string of data, rather than NULL.
if data == "" {
return mount(source, target, fstype, flags, nil)
}
datap, err := BytePtrFromString(data)
if err != nil {
return err
}
return mount(source, target, fstype, flags, datap)
}
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
}
return sendfile(outfd, infd, offset, count)
}
// Sendto
// Recvfrom
// Socketpair
/*
* Direct access
*/
//sys Acct(path string) (err error)
//sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error)
//sys Adjtimex(buf *Timex) (state int, err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys DeleteModule(name string, flags int) (err error)
//sys Dup(oldfd int) (fd int, err error)
//sys Dup3(oldfd int, newfd int, flags int) (err error)
//sysnb EpollCreate1(flag int) (fd int, err error)
//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2
//sys Exit(code int) = SYS_EXIT_GROUP
//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
//sys Fchdir(fd int) (err error)
//sys Fchmod(fd int, mode uint32) (err error)
//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys Fdatasync(fd int) (err error)
//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error)
//sys FinitModule(fd int, params string, flags int) (err error)
//sys Flistxattr(fd int, dest []byte) (sz int, err error)
//sys Flock(fd int, how int) (err error)
//sys Fremovexattr(fd int, attr string) (err error)
//sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error)
//sys Fsync(fd int) (err error)
//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
//sysnb Getpgid(pid int) (pgid int, err error)
func Getpgrp() (pid int) {
pid, _ = Getpgid(0)
return
}
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sys Getrandom(buf []byte, flags int) (n int, err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
//sys InitModule(moduleImage []byte, params string) (err error)
//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
//sysnb InotifyInit1(flags int) (fd int, err error)
//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
//sysnb Kill(pid int, sig syscall.Signal) (err error)
//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
//sys Lgetxattr(path string, attr string, dest []byte) (sz int, err error)
//sys Listxattr(path string, dest []byte) (sz int, err error)
//sys Llistxattr(path string, dest []byte) (sz int, err error)
//sys Lremovexattr(path string, attr string) (err error)
//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error)
//sys MemfdCreate(name string, flags int) (fd int, err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
//sys read(fd int, p []byte) (n int, err error)
//sys Removexattr(path string, attr string) (err error)
//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
//sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error)
//sys Setdomainname(p []byte) (err error)
//sys Sethostname(p []byte) (err error)
//sysnb Setpgid(pid int, pgid int) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tv *Timeval) (err error)
//sys Setns(fd int, nstype int) (err error)
// issue 1435.
// On linux Setuid and Setgid only affects the current thread, not the process.
// This does not match what most callers expect so we must return an error
// here rather than letting the caller think that the call succeeded.
func Setuid(uid int) (err error) {
return EOPNOTSUPP
}
func Setgid(uid int) (err error) {
return EOPNOTSUPP
}
//sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4
//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync()
//sys Syncfs(fd int) (err error)
//sysnb Sysinfo(info *Sysinfo_t) (err error)
//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error)
//sysnb Times(tms *Tms) (ticks uintptr, err error)
//sysnb Umask(mask int) (oldmask int)
//sysnb Uname(buf *Utsname) (err error)
//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
//sys Unshare(flags int) (err error)
//sys write(fd int, p []byte) (n int, err error)
//sys exitThread(code int) (err error) = SYS_EXIT
//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
// mmap varies by architecture; see syscall_linux_*.go.
//sys munmap(addr uintptr, length uintptr) (err error)
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
//sys Madvise(b []byte, advice int) (err error)
//sys Mprotect(b []byte, prot int) (err error)
//sys Mlock(b []byte) (err error)
//sys Mlockall(flags int) (err error)
//sys Msync(b []byte, flags int) (err error)
//sys Munlock(b []byte) (err error)
//sys Munlockall() (err error)
// Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd,
// using the specified flags.
func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
var p unsafe.Pointer
if len(iovs) > 0 {
p = unsafe.Pointer(&iovs[0])
}
n, _, errno := Syscall6(SYS_VMSPLICE, uintptr(fd), uintptr(p), uintptr(len(iovs)), uintptr(flags), 0, 0)
if errno != 0 {
return 0, syscall.Errno(errno)
}
return int(n), nil
}
//sys faccessat(dirfd int, path string, mode uint32) (err error)
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
return EINVAL
}
// The Linux kernel faccessat system call does not take any flags.
// The glibc faccessat implements the flags itself; see
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD
// Because people naturally expect syscall.Faccessat to act
// like C faccessat, we do the same.
if flags == 0 {
return faccessat(dirfd, path, mode)
}
var st Stat_t
if err := Fstatat(dirfd, path, &st, flags&AT_SYMLINK_NOFOLLOW); err != nil {
return err
}
mode &= 7
if mode == 0 {
return nil
}
var uid int
if flags&AT_EACCESS != 0 {
uid = Geteuid()
} else {
uid = Getuid()
}
if uid == 0 {
if mode&1 == 0 {
// Root can read and write any file.
return nil
}
if st.Mode&0111 != 0 {
// Root can execute any file that anybody can execute.
return nil
}
return EACCES
}
var fmode uint32
if uint32(uid) == st.Uid {
fmode = (st.Mode >> 6) & 7
} else {
var gid int
if flags&AT_EACCESS != 0 {
gid = Getegid()
} else {
gid = Getgid()
}
if uint32(gid) == st.Gid {
fmode = (st.Mode >> 3) & 7
} else {
fmode = st.Mode & 7
}
}
if fmode&mode == mode {
return nil
}
return EACCES
}
/*
* Unimplemented
*/
// AfsSyscall
// Alarm
// ArchPrctl
// Brk
// Capget
// Capset
// ClockNanosleep
// ClockSettime
// Clone
// EpollCtlOld
// EpollPwait
// EpollWaitOld
// Execve
// Fork
// Futex
// GetKernelSyms
// GetMempolicy
// GetRobustList
// GetThreadArea
// Getitimer
// Getpmsg
// IoCancel
// IoDestroy
// IoGetevents
// IoSetup
// IoSubmit
// IoprioGet
// IoprioSet
// KexecLoad
// LookupDcookie
// Mbind
// MigratePages
// Mincore
// ModifyLdt
// Mount
// MovePages
// MqGetsetattr
// MqNotify
// MqOpen
// MqTimedreceive
// MqTimedsend
// MqUnlink
// Mremap
// Msgctl
// Msgget
// Msgrcv
// Msgsnd
// Nfsservctl
// Personality
// Pselect6
// Ptrace
// Putpmsg
// Quotactl
// Readahead
// Readv
// RemapFilePages
// RestartSyscall
// RtSigaction
// RtSigpending
// RtSigprocmask
// RtSigqueueinfo
// RtSigreturn
// RtSigsuspend
// RtSigtimedwait
// SchedGetPriorityMax
// SchedGetPriorityMin
// SchedGetparam
// SchedGetscheduler
// SchedRrGetInterval
// SchedSetparam
// SchedYield
// Security
// Semctl
// Semget
// Semop
// Semtimedop
// SetMempolicy
// SetRobustList
// SetThreadArea
// SetTidAddress
// Shmat
// Shmctl
// Shmdt
// Shmget
// Sigaltstack
// Swapoff
// Swapon
// Sysfs
// TimerCreate
// TimerDelete
// TimerGetoverrun
// TimerGettime
// TimerSettime
// Timerfd
// Tkill (obsolete)
// Tuxcall
// Umount2
// Uselib
// Utimensat
// Vfork
// Vhangup
// Vserver
// Waitid
// _Sysctl
|
package uploader
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/dustin/go-humanize"
"github.com/go-errors/errors"
"github.com/itchio/go-itchio"
"github.com/itchio/wharf/counter"
"github.com/itchio/wharf/pwr"
"github.com/itchio/wharf/splitfunc"
"github.com/itchio/wharf/timeout"
)
var seed = 0
func fromEnv(envName string, defaultValue int) int {
v := os.Getenv(envName)
if v != "" {
iv, err := strconv.Atoi(v)
if err == nil {
log.Printf("Override set: %s = %d", envName, iv)
return iv
}
}
return defaultValue
}
var resumableMaxRetries = fromEnv("WHARF_MAX_RETRIES", 15)
var resumableConnectTimeout = time.Duration(fromEnv("WHARF_CONNECT_TIMEOUT", 30)) * time.Second
var resumableIdleTimeout = time.Duration(fromEnv("WHARF_IDLE_TIMEOUT", 60)) * time.Second
// ResumableUpload keeps track of an upload and reports back on its progress
type ResumableUpload struct {
c *itchio.Client
TotalBytes int64
UploadedBytes int64
OnProgress func()
// resumable URL as per GCS
uploadURL string
// where data is written so we can update counts
writeCounter io.Writer
// need to flush to squeeze all the data out
bufferedWriter *bufio.Writer
// need to close so reader end of pipe gets EOF
pipeWriter io.Closer
id int
consumer *pwr.StateConsumer
}
// Close flushes all intermediary buffers and closes the connection
func (ru *ResumableUpload) Close() error {
var err error
ru.Debugf("flushing buffered writer, %d written", ru.TotalBytes)
err = ru.bufferedWriter.Flush()
if err != nil {
return errors.Wrap(err, 1)
}
ru.Debugf("closing pipe writer")
err = ru.pipeWriter.Close()
if err != nil {
return errors.Wrap(err, 1)
}
ru.Debugf("closed pipe writer")
ru.Debugf("everything closed! uploadedbytes = %d, totalbytes = %d", ru.UploadedBytes, ru.TotalBytes)
return nil
}
// Write is our implementation of io.Writer
func (ru *ResumableUpload) Write(p []byte) (int, error) {
return ru.writeCounter.Write(p)
}
func NewResumableUpload(uploadURL string, done chan bool, errs chan error, consumer *pwr.StateConsumer) (*ResumableUpload, error) {
ru := &ResumableUpload{}
ru.uploadURL = uploadURL
ru.id = seed
seed++
ru.consumer = consumer
ru.c = itchio.ClientWithKey("x")
ru.c.HTTPClient = timeout.NewClient(resumableConnectTimeout, resumableIdleTimeout)
pipeR, pipeW := io.Pipe()
ru.pipeWriter = pipeW
// TODO: make configurable?
const bufferSize = 32 * 1024 * 1024
bufferedWriter := bufio.NewWriterSize(pipeW, bufferSize)
ru.bufferedWriter = bufferedWriter
onWrite := func(count int64) {
// ru.Debugf("onwrite %d", count)
ru.TotalBytes = count
if ru.OnProgress != nil {
ru.OnProgress()
}
}
ru.writeCounter = counter.NewWriterCallback(onWrite, bufferedWriter)
go ru.uploadChunks(pipeR, done, errs)
return ru, nil
}
func (ru *ResumableUpload) Debugf(f string, args ...interface{}) {
ru.consumer.Debugf("[upload %d] %s", ru.id, fmt.Sprintf(f, args...))
}
const minChunkSize = 256 * 1024 // 256KB
const maxChunkGroup = 64
const maxSendBuf = maxChunkGroup * minChunkSize // 16MB
type blockItem struct {
buf []byte
isLast bool
}
type netError struct {
err error
}
func (ne *netError) Error() string {
return fmt.Sprintf("network error: %s", ne.err.Error())
}
func (ru *ResumableUpload) uploadChunks(reader io.Reader, done chan bool, errs chan error) {
var offset int64 = 0
sendBuf := make([]byte, 0, maxSendBuf)
reqBlocks := make(chan blockItem, maxChunkGroup)
canceller := make(chan bool)
doSendBytesOnce := func(buf []byte, isLast bool) error {
buflen := int64(len(sendBuf))
ru.Debugf("uploading chunk of %d bytes", buflen)
body := bytes.NewReader(buf)
countingReader := counter.NewReaderCallback(func(count int64) {
ru.UploadedBytes = offset + count
if ru.OnProgress != nil {
ru.OnProgress()
}
}, body)
req, err := http.NewRequest("PUT", ru.uploadURL, countingReader)
if err != nil {
return errors.Wrap(err, 1)
}
start := offset
end := start + buflen - 1
contentRange := fmt.Sprintf("bytes %d-%d/*", offset, end)
ru.Debugf("uploading %d-%d, last? %v", start, end, isLast)
if isLast {
contentRange = fmt.Sprintf("bytes %d-%d/%d", offset, end, offset+buflen)
}
req.Header.Set("content-range", contentRange)
res, err := ru.c.Do(req)
if err != nil {
ru.Debugf("while uploading %d-%d: \n%s", start, end, err.Error())
return &netError{err}
}
if res.StatusCode != 200 && res.StatusCode != 308 {
ru.Debugf("uh oh, got HTTP %s", res.Status)
resb, _ := ioutil.ReadAll(res.Body)
ru.Debugf("server said %s", string(resb))
// retry requests that return these, see full list
// at https://cloud.google.com/storage/docs/xml-api/resumable-upload
// see also https://github.com/itchio/butler/issues/71
if res.StatusCode == 408 /* Request Timeout */ ||
res.StatusCode == 500 /* Internal Server Error */ ||
res.StatusCode == 502 /* Bad Gateway */ ||
res.StatusCode == 503 /* Service Unavailable */ ||
res.StatusCode == 504 /* Gateway Timeout */ {
return &netError{err}
}
return fmt.Errorf("HTTP %d while uploading", res.StatusCode)
}
offset += buflen
ru.Debugf("%s uploaded, at %s", humanize.Bytes(uint64(offset)), res.Status)
return nil
}
doSendBytes := func(buf []byte, isLast bool) error {
tries := 1
for tries < resumableMaxRetries {
err := doSendBytesOnce(buf, isLast)
if err != nil {
if ne, ok := err.(*netError); ok {
delay := tries * tries
ru.consumer.PauseProgress()
ru.consumer.Infof("")
ru.consumer.Infof("%s", ne.Error())
ru.consumer.Infof("Sleeping %d seconds then retrying", delay)
time.Sleep(time.Second * time.Duration(delay))
ru.consumer.ResumeProgress()
tries++
continue
} else {
return errors.Wrap(err, 1)
}
} else {
return nil
}
}
return fmt.Errorf("Too many network errors, giving up.")
}
s := bufio.NewScanner(reader)
s.Buffer(make([]byte, minChunkSize), 0)
s.Split(splitfunc.New(minChunkSize))
// we need two buffers to know when we're at EOF,
// for sizes that are an exact multiple of minChunkSize
buf1 := make([]byte, 0, minChunkSize)
buf2 := make([]byte, 0, minChunkSize)
subDone := make(chan bool)
subErrs := make(chan error)
ru.Debugf("kicking off sender")
go func() {
isLast := false
for !isLast {
sendBuf = sendBuf[:0]
for len(sendBuf) < maxSendBuf && !isLast {
var item blockItem
if len(sendBuf) == 0 {
ru.Debugf("sender blocking receive")
select {
case item = <-reqBlocks:
// cool
case <-canceller:
ru.Debugf("send cancelled")
return
}
} else {
ru.Debugf("sender non-blocking receive")
select {
case item = <-reqBlocks:
// cool
case <-canceller:
ru.Debugf("send cancelled")
return
default:
ru.Debugf("sent faster than scanned, uploading smaller chunk")
break
}
}
if item.isLast {
isLast = true
}
sendBuf = append(sendBuf, item.buf...)
}
if len(sendBuf) > 0 {
err := doSendBytes(sendBuf, isLast)
if err != nil {
ru.Debugf("send error, bailing out")
subErrs <- errors.Wrap(err, 1)
return
}
}
}
subDone <- true
ru.Debugf("sender done")
}()
scannedBufs := make(chan []byte)
usedBufs := make(chan bool)
go func() {
for s.Scan() {
select {
case scannedBufs <- s.Bytes():
// woo
case <-canceller:
ru.Debugf("scan cancelled (1)")
break
}
select {
case <-usedBufs:
// woo
case <-canceller:
ru.Debugf("scan cancelled (2)")
break
}
}
close(scannedBufs)
}()
// break patch into chunks of minChunkSize, signal last block
go func() {
for scannedBuf := range scannedBufs {
buf2 = append(buf2[:0], buf1...)
buf1 = append(buf1[:0], scannedBuf...)
usedBufs <- true
// all but first iteration
if len(buf2) > 0 {
select {
case reqBlocks <- blockItem{buf: append([]byte{}, buf2...), isLast: false}:
// okay cool let's go c'mon
case <-canceller:
ru.Debugf("scan cancelled (3)")
return
}
}
}
err := s.Err()
if err != nil {
ru.Debugf("scanner error :(")
subErrs <- errors.Wrap(err, 1)
return
}
select {
case reqBlocks <- blockItem{buf: append([]byte{}, buf1...), isLast: true}:
case <-canceller:
ru.Debugf("scan cancelled (right near the finish line)")
return
}
subDone <- true
ru.Debugf("scanner done")
}()
for i := 0; i < 2; i++ {
select {
case <-subDone:
// woo!
case err := <-subErrs:
ru.Debugf("got sub error: %s, bailing", err.Error())
close(canceller)
errs <- errors.Wrap(err, 1)
return
}
}
done <- true
ru.Debugf("done sent!")
}
Pass non-nil error
package uploader
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/dustin/go-humanize"
"github.com/go-errors/errors"
"github.com/itchio/go-itchio"
"github.com/itchio/wharf/counter"
"github.com/itchio/wharf/pwr"
"github.com/itchio/wharf/splitfunc"
"github.com/itchio/wharf/timeout"
)
var seed = 0
func fromEnv(envName string, defaultValue int) int {
v := os.Getenv(envName)
if v != "" {
iv, err := strconv.Atoi(v)
if err == nil {
log.Printf("Override set: %s = %d", envName, iv)
return iv
}
}
return defaultValue
}
var resumableMaxRetries = fromEnv("WHARF_MAX_RETRIES", 15)
var resumableConnectTimeout = time.Duration(fromEnv("WHARF_CONNECT_TIMEOUT", 30)) * time.Second
var resumableIdleTimeout = time.Duration(fromEnv("WHARF_IDLE_TIMEOUT", 60)) * time.Second
// ResumableUpload keeps track of an upload and reports back on its progress
type ResumableUpload struct {
c *itchio.Client
TotalBytes int64
UploadedBytes int64
OnProgress func()
// resumable URL as per GCS
uploadURL string
// where data is written so we can update counts
writeCounter io.Writer
// need to flush to squeeze all the data out
bufferedWriter *bufio.Writer
// need to close so reader end of pipe gets EOF
pipeWriter io.Closer
id int
consumer *pwr.StateConsumer
}
// Close flushes all intermediary buffers and closes the connection
func (ru *ResumableUpload) Close() error {
var err error
ru.Debugf("flushing buffered writer, %d written", ru.TotalBytes)
err = ru.bufferedWriter.Flush()
if err != nil {
return errors.Wrap(err, 1)
}
ru.Debugf("closing pipe writer")
err = ru.pipeWriter.Close()
if err != nil {
return errors.Wrap(err, 1)
}
ru.Debugf("closed pipe writer")
ru.Debugf("everything closed! uploadedbytes = %d, totalbytes = %d", ru.UploadedBytes, ru.TotalBytes)
return nil
}
// Write is our implementation of io.Writer
func (ru *ResumableUpload) Write(p []byte) (int, error) {
return ru.writeCounter.Write(p)
}
func NewResumableUpload(uploadURL string, done chan bool, errs chan error, consumer *pwr.StateConsumer) (*ResumableUpload, error) {
ru := &ResumableUpload{}
ru.uploadURL = uploadURL
ru.id = seed
seed++
ru.consumer = consumer
ru.c = itchio.ClientWithKey("x")
ru.c.HTTPClient = timeout.NewClient(resumableConnectTimeout, resumableIdleTimeout)
pipeR, pipeW := io.Pipe()
ru.pipeWriter = pipeW
// TODO: make configurable?
const bufferSize = 32 * 1024 * 1024
bufferedWriter := bufio.NewWriterSize(pipeW, bufferSize)
ru.bufferedWriter = bufferedWriter
onWrite := func(count int64) {
// ru.Debugf("onwrite %d", count)
ru.TotalBytes = count
if ru.OnProgress != nil {
ru.OnProgress()
}
}
ru.writeCounter = counter.NewWriterCallback(onWrite, bufferedWriter)
go ru.uploadChunks(pipeR, done, errs)
return ru, nil
}
func (ru *ResumableUpload) Debugf(f string, args ...interface{}) {
ru.consumer.Debugf("[upload %d] %s", ru.id, fmt.Sprintf(f, args...))
}
const minChunkSize = 256 * 1024 // 256KB
const maxChunkGroup = 64
const maxSendBuf = maxChunkGroup * minChunkSize // 16MB
type blockItem struct {
buf []byte
isLast bool
}
type netError struct {
err error
}
func (ne *netError) Error() string {
return fmt.Sprintf("network error: %s", ne.err.Error())
}
func (ru *ResumableUpload) uploadChunks(reader io.Reader, done chan bool, errs chan error) {
var offset int64 = 0
sendBuf := make([]byte, 0, maxSendBuf)
reqBlocks := make(chan blockItem, maxChunkGroup)
canceller := make(chan bool)
doSendBytesOnce := func(buf []byte, isLast bool) error {
buflen := int64(len(sendBuf))
ru.Debugf("uploading chunk of %d bytes", buflen)
body := bytes.NewReader(buf)
countingReader := counter.NewReaderCallback(func(count int64) {
ru.UploadedBytes = offset + count
if ru.OnProgress != nil {
ru.OnProgress()
}
}, body)
req, err := http.NewRequest("PUT", ru.uploadURL, countingReader)
if err != nil {
return errors.Wrap(err, 1)
}
start := offset
end := start + buflen - 1
contentRange := fmt.Sprintf("bytes %d-%d/*", offset, end)
ru.Debugf("uploading %d-%d, last? %v", start, end, isLast)
if isLast {
contentRange = fmt.Sprintf("bytes %d-%d/%d", offset, end, offset+buflen)
}
req.Header.Set("content-range", contentRange)
res, err := ru.c.Do(req)
if err != nil {
ru.Debugf("while uploading %d-%d: \n%s", start, end, err.Error())
return &netError{err}
}
if res.StatusCode != 200 && res.StatusCode != 308 {
ru.Debugf("uh oh, got HTTP %s", res.Status)
resb, _ := ioutil.ReadAll(res.Body)
ru.Debugf("server said %s", string(resb))
err = fmt.Errorf("HTTP %d while uploading", res.StatusCode)
// retry requests that return these, see full list
// at https://cloud.google.com/storage/docs/xml-api/resumable-upload
// see also https://github.com/itchio/butler/issues/71
if res.StatusCode == 408 /* Request Timeout */ ||
res.StatusCode == 500 /* Internal Server Error */ ||
res.StatusCode == 502 /* Bad Gateway */ ||
res.StatusCode == 503 /* Service Unavailable */ ||
res.StatusCode == 504 /* Gateway Timeout */ {
return &netError{err}
}
return err
}
offset += buflen
ru.Debugf("%s uploaded, at %s", humanize.Bytes(uint64(offset)), res.Status)
return nil
}
doSendBytes := func(buf []byte, isLast bool) error {
tries := 1
for tries < resumableMaxRetries {
err := doSendBytesOnce(buf, isLast)
if err != nil {
if ne, ok := err.(*netError); ok {
delay := tries * tries
ru.consumer.PauseProgress()
ru.consumer.Infof("")
ru.consumer.Infof("%s", ne.Error())
ru.consumer.Infof("Sleeping %d seconds then retrying", delay)
time.Sleep(time.Second * time.Duration(delay))
ru.consumer.ResumeProgress()
tries++
continue
} else {
return errors.Wrap(err, 1)
}
} else {
return nil
}
}
return fmt.Errorf("Too many network errors, giving up.")
}
s := bufio.NewScanner(reader)
s.Buffer(make([]byte, minChunkSize), 0)
s.Split(splitfunc.New(minChunkSize))
// we need two buffers to know when we're at EOF,
// for sizes that are an exact multiple of minChunkSize
buf1 := make([]byte, 0, minChunkSize)
buf2 := make([]byte, 0, minChunkSize)
subDone := make(chan bool)
subErrs := make(chan error)
ru.Debugf("kicking off sender")
go func() {
isLast := false
for !isLast {
sendBuf = sendBuf[:0]
for len(sendBuf) < maxSendBuf && !isLast {
var item blockItem
if len(sendBuf) == 0 {
ru.Debugf("sender blocking receive")
select {
case item = <-reqBlocks:
// cool
case <-canceller:
ru.Debugf("send cancelled")
return
}
} else {
ru.Debugf("sender non-blocking receive")
select {
case item = <-reqBlocks:
// cool
case <-canceller:
ru.Debugf("send cancelled")
return
default:
ru.Debugf("sent faster than scanned, uploading smaller chunk")
break
}
}
if item.isLast {
isLast = true
}
sendBuf = append(sendBuf, item.buf...)
}
if len(sendBuf) > 0 {
err := doSendBytes(sendBuf, isLast)
if err != nil {
ru.Debugf("send error, bailing out")
subErrs <- errors.Wrap(err, 1)
return
}
}
}
subDone <- true
ru.Debugf("sender done")
}()
scannedBufs := make(chan []byte)
usedBufs := make(chan bool)
go func() {
for s.Scan() {
select {
case scannedBufs <- s.Bytes():
// woo
case <-canceller:
ru.Debugf("scan cancelled (1)")
break
}
select {
case <-usedBufs:
// woo
case <-canceller:
ru.Debugf("scan cancelled (2)")
break
}
}
close(scannedBufs)
}()
// break patch into chunks of minChunkSize, signal last block
go func() {
for scannedBuf := range scannedBufs {
buf2 = append(buf2[:0], buf1...)
buf1 = append(buf1[:0], scannedBuf...)
usedBufs <- true
// all but first iteration
if len(buf2) > 0 {
select {
case reqBlocks <- blockItem{buf: append([]byte{}, buf2...), isLast: false}:
// okay cool let's go c'mon
case <-canceller:
ru.Debugf("scan cancelled (3)")
return
}
}
}
err := s.Err()
if err != nil {
ru.Debugf("scanner error :(")
subErrs <- errors.Wrap(err, 1)
return
}
select {
case reqBlocks <- blockItem{buf: append([]byte{}, buf1...), isLast: true}:
case <-canceller:
ru.Debugf("scan cancelled (right near the finish line)")
return
}
subDone <- true
ru.Debugf("scanner done")
}()
for i := 0; i < 2; i++ {
select {
case <-subDone:
// woo!
case err := <-subErrs:
ru.Debugf("got sub error: %s, bailing", err.Error())
close(canceller)
errs <- errors.Wrap(err, 1)
return
}
}
done <- true
ru.Debugf("done sent!")
}
|
package git
import (
"fmt"
"strings"
"time"
)
// CloneOpts is a optional structs for git clone command
type CloneOpts struct {
Depth int
SingleBranch bool
Branch string
Recursive bool
Verbose bool
Quiet bool
CheckoutCommit string
NoStrictHostKeyChecking bool
}
// Clone make a git clone
func Clone(repo string, path string, auth *AuthOpts, opts *CloneOpts, output *OutputOpts) error {
if verbose {
t1 := time.Now()
if opts != nil && opts.CheckoutCommit != "" {
defer LogFunc("Checkout commit %s", opts.CheckoutCommit)
}
defer LogFunc("Git clone %s (%v s)", path, int(time.Since(t1).Seconds()))
}
var commands []cmd
repoURL, err := getRepoURL(repo, auth)
if err != nil {
return err
}
commands = prepareGitCloneCommands(repoURL, path, opts)
return runGitCommands(repo, commands, auth, output)
}
func prepareGitCloneCommands(repo string, path string, opts *CloneOpts) cmds {
allCmd := []cmd{}
gitcmd := cmd{
cmd: "git",
args: []string{"clone"},
}
if opts != nil {
if opts.Quiet {
gitcmd.args = append(gitcmd.args, "--quiet")
} else if opts.Verbose {
gitcmd.args = append(gitcmd.args, "--verbose")
}
if opts.CheckoutCommit == "" {
if opts.Depth != 0 {
gitcmd.args = append(gitcmd.args, "--depth", fmt.Sprintf("%d", opts.Depth))
}
}
if opts.Branch != "" {
gitcmd.args = append(gitcmd.args, "--branch", opts.Branch)
} else if opts.SingleBranch {
gitcmd.args = append(gitcmd.args, "--single-branch")
}
if opts.Recursive {
gitcmd.args = append(gitcmd.args, "--recursive")
}
}
gitcmd.args = append(gitcmd.args, repo)
if path != "" {
gitcmd.args = append(gitcmd.args, path)
}
allCmd = append(allCmd, gitcmd)
if opts != nil && opts.CheckoutCommit != "" {
resetCmd := cmd{
cmd: "git",
args: []string{"reset", "--hard", opts.CheckoutCommit},
}
//Locate the git reset cmd to the right directory
if path == "" {
t := strings.Split(repo, "/")
resetCmd.dir = strings.TrimSuffix(t[len(t)-1], ".git")
} else {
resetCmd.dir = path
}
allCmd = append(allCmd, resetCmd)
}
return cmds(allCmd)
}
fix(sdk): add no-single-branch for git clone with depth (#3162)
package git
import (
"fmt"
"strings"
"time"
)
// CloneOpts is a optional structs for git clone command
type CloneOpts struct {
Depth int
SingleBranch bool
Branch string
Recursive bool
Verbose bool
Quiet bool
CheckoutCommit string
NoStrictHostKeyChecking bool
}
// Clone make a git clone
func Clone(repo string, path string, auth *AuthOpts, opts *CloneOpts, output *OutputOpts) error {
if verbose {
t1 := time.Now()
if opts != nil && opts.CheckoutCommit != "" {
defer LogFunc("Checkout commit %s", opts.CheckoutCommit)
}
defer LogFunc("Git clone %s (%v s)", path, int(time.Since(t1).Seconds()))
}
var commands []cmd
repoURL, err := getRepoURL(repo, auth)
if err != nil {
return err
}
commands = prepareGitCloneCommands(repoURL, path, opts)
return runGitCommands(repo, commands, auth, output)
}
func prepareGitCloneCommands(repo string, path string, opts *CloneOpts) cmds {
allCmd := []cmd{}
gitcmd := cmd{
cmd: "git",
args: []string{"clone"},
}
if opts != nil {
if opts.Quiet {
gitcmd.args = append(gitcmd.args, "--quiet")
} else if opts.Verbose {
gitcmd.args = append(gitcmd.args, "--verbose")
}
if opts.CheckoutCommit == "" {
if opts.Depth != 0 {
gitcmd.args = append(gitcmd.args, "--depth", fmt.Sprintf("%d", opts.Depth))
}
}
if opts.Branch != "" {
gitcmd.args = append(gitcmd.args, "--branch", opts.Branch)
} else if opts.SingleBranch {
gitcmd.args = append(gitcmd.args, "--single-branch")
}
if !opts.SingleBranch && opts.Depth != 0 {
gitcmd.args = append(gitcmd.args, "--no-single-branch")
}
if opts.Recursive {
gitcmd.args = append(gitcmd.args, "--recursive")
}
}
gitcmd.args = append(gitcmd.args, repo)
if path != "" {
gitcmd.args = append(gitcmd.args, path)
}
allCmd = append(allCmd, gitcmd)
if opts != nil && opts.CheckoutCommit != "" {
resetCmd := cmd{
cmd: "git",
args: []string{"reset", "--hard", opts.CheckoutCommit},
}
//Locate the git reset cmd to the right directory
if path == "" {
t := strings.Split(repo, "/")
resetCmd.dir = strings.TrimSuffix(t[len(t)-1], ".git")
} else {
resetCmd.dir = path
}
allCmd = append(allCmd, resetCmd)
}
return cmds(allCmd)
}
|
package auth
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"sync"
jwt "github.com/dgrijalva/jwt-go"
uuid "github.com/nu7hatch/gouuid"
log "github.com/sirupsen/logrus"
)
const (
sessionTokenName = "kuill"
claimExpires = "exp"
claimNotBefore = "nbf"
claimCSRFToken = "csrf"
claimUserID = "uid"
claimGroups = "grp"
)
// Context is a holder for the currently authenticated user's information
type Context interface {
User() string
Groups() []string
}
// Authenticator is a pluggable interface for authentication providers
type Authenticator interface {
// Name returns the url-safe unique name of this authenticator
Name() string
// Description returns the user-friendly description of this authenticator
Description() string
// Type returns the type of this authenticator
Type() string
// GetHandlers returns the handlers for this authenticator; the set of
// handlers must include a "login" handler which will be triggered
// GetHandlers() map[string]http.HandlerFunc
// LoginURL returns the initial login URL for this handler
Authenticate(w http.ResponseWriter, r *http.Request) (*SessionToken, error)
LoginURL() string
// IconURL returns an icon URL to signify this login method; empty string implies a default can be used
IconURL() string
// PostWithCredentials returns true if this authenticator expects username/password credentials be POST'd
PostWithCredentials() bool
}
// Delegate is a function which requires user and group information along with
// the standard http request/response parameters
type Delegate func(w http.ResponseWriter, r *http.Request, authentication Context)
// NewAuthDelegate returns a new http.Handler func which delegates to the
// provided AuthDelegate, passing the resolved user,group information form
// the session where found, otherwise returning 401 Unauthorized.
func (m *Manager) NewAuthDelegate(delegate Delegate) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
session, err := m.ParseSessionToken(r)
if session == nil {
if err != nil {
log.Warnf("Authentication error: %v", err)
} else {
log.Infof("No existing/valid session")
}
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
m.keepSessionAlive(session, w)
delegate(w, r, session)
}
}
// Manager manages supported authentication mechanisms
type Manager struct {
authenticators map[string]Authenticator
loginMethodsResponse []byte
mutex sync.Mutex
hmac []byte
}
// NewAuthManager creates a new authentication manager instance
func NewAuthManager() (*Manager, error) {
hmc, _ := uuid.NewV4()
m := &Manager{
authenticators: make(map[string]Authenticator),
hmac: []byte(hmc.String()),
}
m.loginMethodsResponse, _ = m.buildLoginMethodsResponse()
http.HandleFunc("/auth/login_methods", m.listLoginMethods)
http.HandleFunc("/auth/user_info", m.userInfo)
return m, nil
}
func (m *Manager) buildLoginMethodsResponse() ([]byte, error) {
loginMethods := []map[string]interface{}{}
for id, authN := range m.authenticators {
loginMethods = append(loginMethods, map[string]interface{}{
"id": id,
"name": authN.Name(),
"desc": authN.Description(),
"type": authN.Type(),
"url": authN.LoginURL(),
"icon": authN.IconURL(),
"post_creds": authN.PostWithCredentials(),
})
}
resp := map[string]interface{}{
"login_methods": loginMethods,
}
return json.Marshal(&resp)
}
// RegisterAuthenticator registers an authentication provider
func (m *Manager) RegisterAuthenticator(authn Authenticator) error {
key := fmt.Sprintf("%s:%s", authn.Type(), authn.Name())
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.authenticators[key]; ok {
return fmt.Errorf("An authenticator already exists with key '%s'", key)
}
m.authenticators[key] = authn
var handlerFunc http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
sessionToken, err := authn.Authenticate(w, r)
if err != nil {
log.Error(err)
} else if sessionToken != nil {
m.completeAuthentication(sessionToken, w, r)
}
}
http.HandleFunc(authn.LoginURL(), handlerFunc)
var err error
m.loginMethodsResponse, err = m.buildLoginMethodsResponse()
if err != nil {
return fmt.Errorf("Problem marshalling cached authenticators response: %v", err)
}
log.Infof("Enabled authenticator: %s => %s", key, authn.LoginURL())
return nil
}
func (m *Manager) listLoginMethods(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(m.loginMethodsResponse)))
w.Write(m.loginMethodsResponse)
}
type userInfo struct {
User string `json:"user,omitempty"`
SessionExpires string `json:"session_expires,omitempty"`
}
func (m *Manager) userInfo(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
session, _ := m.ParseSessionToken(r)
m.keepSessionAlive(session, w)
m.respondWithUserInfo(session, w)
}
func (m *Manager) respondWithUserInfo(session *SessionToken, w http.ResponseWriter) {
var resp userInfo
if session != nil {
resp.User = session.User()
resp.SessionExpires = time.Unix(session.Expires(), 0).Format(time.RFC3339)
}
data, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Failed marshalling user response", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
w.Write(data)
}
func (m *Manager) keepSessionAlive(session *SessionToken, w http.ResponseWriter) {
// If the session expires in less than 1 minute, renew it
if session != nil && session.Valid && session.Expires() < (time.Now().Unix()-int64(time.Minute.Seconds())) {
session = NewSessionToken(session.User(), []string{}, session.claims)
m.writeSessionCookie(session, w)
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Renewed session for %s: expires %d -> %d", session.User(), session.Expires())
}
}
}
func (m *Manager) writeSessionCookie(session *SessionToken, w http.ResponseWriter) {
signedJWT, err := session.SignedString(m.hmac)
if err != nil {
http.Error(w, "Failed to sign JWT: "+err.Error(), http.StatusInternalServerError)
return
}
http.SetCookie(w, &http.Cookie{
Name: sessionTokenName,
Value: signedJWT,
HttpOnly: true,
Path: "/",
})
}
// TODO: this should be re-worked so that it wraps authenticators' authenticate methods
func (m *Manager) completeAuthentication(session *SessionToken, w http.ResponseWriter, r *http.Request) {
m.writeSessionCookie(session, w)
targetURL := r.URL.Query().Get("target")
if len(targetURL) > 0 {
http.Redirect(w, r, targetURL, http.StatusFound)
} else {
m.respondWithUserInfo(session, w)
}
}
// SessionToken is a wrapper around JWT with methods for easy user/group access
type SessionToken struct {
*jwt.Token
claims jwt.MapClaims
}
// User recovers the userID from a session token
func (s *SessionToken) User() string {
return s.claims[claimUserID].(string)
}
// Expires returns the expiration of the token in Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (s *SessionToken) Expires() int64 {
expires := s.claims[claimExpires]
if intVal, ok := expires.(int64); ok {
return intVal
}
return int64(expires.(float64))
}
// Groups recovers the groups from a session token
func (s *SessionToken) Groups() []string {
return strings.Split(s.claims[claimGroups].(string), ",")
}
// NewSessionToken generates a new auth token suitable for storing user session state
func NewSessionToken(user string, groups []string, additionalClaims map[string]interface{}) *SessionToken {
csrfToken, _ := uuid.NewV4()
claims := jwt.MapClaims{
claimNotBefore: time.Now().Unix(),
claimExpires: time.Now().Add(time.Minute * 15).Unix(),
claimCSRFToken: csrfToken.String(),
claimUserID: user,
claimGroups: strings.Join(groups, ","),
}
if additionalClaims != nil {
for k, v := range additionalClaims {
claims[k] = v
}
}
return &SessionToken{Token: jwt.NewWithClaims(jwt.SigningMethodHS256, claims), claims: claims}
}
// ParseSessionToken recovers the session
func (m *Manager) ParseSessionToken(r *http.Request) (*SessionToken, error) {
cookie, err := r.Cookie(sessionTokenName)
if cookie == nil || err != nil {
return nil, nil
}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Found cookie for %s; %s: %s", r.URL, sessionTokenName, cookie.Value)
}
token, err := jwt.Parse(cookie.Value, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return m.hmac, nil
})
if err != nil {
return nil, err
} else if claims, ok := token.Claims.(jwt.MapClaims); ok {
st := &SessionToken{Token: token, claims: claims}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Resolved session token %v", st)
}
return st, nil
}
return nil, fmt.Errorf("Failed to parse token claims")
}
include groups in user info
package auth
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"sync"
jwt "github.com/dgrijalva/jwt-go"
uuid "github.com/nu7hatch/gouuid"
log "github.com/sirupsen/logrus"
)
const (
sessionTokenName = "kuill"
claimExpires = "exp"
claimNotBefore = "nbf"
claimCSRFToken = "csrf"
claimUserID = "uid"
claimGroups = "grp"
)
// Context is a holder for the currently authenticated user's information
type Context interface {
User() string
Groups() []string
}
// Authenticator is a pluggable interface for authentication providers
type Authenticator interface {
// Name returns the url-safe unique name of this authenticator
Name() string
// Description returns the user-friendly description of this authenticator
Description() string
// Type returns the type of this authenticator
Type() string
// GetHandlers returns the handlers for this authenticator; the set of
// handlers must include a "login" handler which will be triggered
// GetHandlers() map[string]http.HandlerFunc
// LoginURL returns the initial login URL for this handler
Authenticate(w http.ResponseWriter, r *http.Request) (*SessionToken, error)
LoginURL() string
// IconURL returns an icon URL to signify this login method; empty string implies a default can be used
IconURL() string
// PostWithCredentials returns true if this authenticator expects username/password credentials be POST'd
PostWithCredentials() bool
}
// Delegate is a function which requires user and group information along with
// the standard http request/response parameters
type Delegate func(w http.ResponseWriter, r *http.Request, authentication Context)
// NewAuthDelegate returns a new http.Handler func which delegates to the
// provided AuthDelegate, passing the resolved user,group information form
// the session where found, otherwise returning 401 Unauthorized.
func (m *Manager) NewAuthDelegate(delegate Delegate) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
session, err := m.ParseSessionToken(r)
if session == nil {
if err != nil {
log.Warnf("Authentication error: %v", err)
} else {
log.Infof("No existing/valid session")
}
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
m.keepSessionAlive(session, w)
delegate(w, r, session)
}
}
// Manager manages supported authentication mechanisms
type Manager struct {
authenticators map[string]Authenticator
loginMethodsResponse []byte
mutex sync.Mutex
hmac []byte
}
// NewAuthManager creates a new authentication manager instance
func NewAuthManager() (*Manager, error) {
hmc, _ := uuid.NewV4()
m := &Manager{
authenticators: make(map[string]Authenticator),
hmac: []byte(hmc.String()),
}
m.loginMethodsResponse, _ = m.buildLoginMethodsResponse()
http.HandleFunc("/auth/login_methods", m.listLoginMethods)
http.HandleFunc("/auth/user_info", m.userInfo)
return m, nil
}
func (m *Manager) buildLoginMethodsResponse() ([]byte, error) {
loginMethods := []map[string]interface{}{}
for id, authN := range m.authenticators {
loginMethods = append(loginMethods, map[string]interface{}{
"id": id,
"name": authN.Name(),
"desc": authN.Description(),
"type": authN.Type(),
"url": authN.LoginURL(),
"icon": authN.IconURL(),
"post_creds": authN.PostWithCredentials(),
})
}
resp := map[string]interface{}{
"login_methods": loginMethods,
}
return json.Marshal(&resp)
}
// RegisterAuthenticator registers an authentication provider
func (m *Manager) RegisterAuthenticator(authn Authenticator) error {
key := fmt.Sprintf("%s:%s", authn.Type(), authn.Name())
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.authenticators[key]; ok {
return fmt.Errorf("An authenticator already exists with key '%s'", key)
}
m.authenticators[key] = authn
var handlerFunc http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
sessionToken, err := authn.Authenticate(w, r)
if err != nil {
log.Error(err)
} else if sessionToken != nil {
m.completeAuthentication(sessionToken, w, r)
}
}
http.HandleFunc(authn.LoginURL(), handlerFunc)
var err error
m.loginMethodsResponse, err = m.buildLoginMethodsResponse()
if err != nil {
return fmt.Errorf("Problem marshalling cached authenticators response: %v", err)
}
log.Infof("Enabled authenticator: %s => %s", key, authn.LoginURL())
return nil
}
func (m *Manager) listLoginMethods(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(m.loginMethodsResponse)))
w.Write(m.loginMethodsResponse)
}
type userInfo struct {
User string `json:"user,omitempty"`
Groups string `json:"groups,omitempty"`
SessionExpires string `json:"session_expires,omitempty"`
}
func (m *Manager) userInfo(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
session, _ := m.ParseSessionToken(r)
m.keepSessionAlive(session, w)
m.respondWithUserInfo(session, w)
}
func (m *Manager) respondWithUserInfo(session *SessionToken, w http.ResponseWriter) {
var resp userInfo
if session != nil {
resp.User = session.User()
resp.Groups = session.Groups()
resp.SessionExpires = time.Unix(session.Expires(), 0).Format(time.RFC3339)
}
data, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Failed marshalling user response", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
w.Write(data)
}
func (m *Manager) keepSessionAlive(session *SessionToken, w http.ResponseWriter) {
// If the session expires in less than 1 minute, renew it
if session != nil && session.Valid && session.Expires() < (time.Now().Unix()-int64(time.Minute.Seconds())) {
session = NewSessionToken(session.User(), []string{}, session.claims)
m.writeSessionCookie(session, w)
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Renewed session for %s: expires %d -> %d", session.User(), session.Expires())
}
}
}
func (m *Manager) writeSessionCookie(session *SessionToken, w http.ResponseWriter) {
signedJWT, err := session.SignedString(m.hmac)
if err != nil {
http.Error(w, "Failed to sign JWT: "+err.Error(), http.StatusInternalServerError)
return
}
http.SetCookie(w, &http.Cookie{
Name: sessionTokenName,
Value: signedJWT,
HttpOnly: true,
Path: "/",
})
}
// TODO: this should be re-worked so that it wraps authenticators' authenticate methods
func (m *Manager) completeAuthentication(session *SessionToken, w http.ResponseWriter, r *http.Request) {
m.writeSessionCookie(session, w)
targetURL := r.URL.Query().Get("target")
if len(targetURL) > 0 {
http.Redirect(w, r, targetURL, http.StatusFound)
} else {
m.respondWithUserInfo(session, w)
}
}
// SessionToken is a wrapper around JWT with methods for easy user/group access
type SessionToken struct {
*jwt.Token
claims jwt.MapClaims
}
// User recovers the userID from a session token
func (s *SessionToken) User() string {
return s.claims[claimUserID].(string)
}
// Expires returns the expiration of the token in Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (s *SessionToken) Expires() int64 {
expires := s.claims[claimExpires]
if intVal, ok := expires.(int64); ok {
return intVal
}
return int64(expires.(float64))
}
// Groups recovers the groups from a session token
func (s *SessionToken) Groups() []string {
return strings.Split(s.claims[claimGroups].(string), ",")
}
// NewSessionToken generates a new auth token suitable for storing user session state
func NewSessionToken(user string, groups []string, additionalClaims map[string]interface{}) *SessionToken {
csrfToken, _ := uuid.NewV4()
claims := jwt.MapClaims{
claimNotBefore: time.Now().Unix(),
claimExpires: time.Now().Add(time.Minute * 15).Unix(),
claimCSRFToken: csrfToken.String(),
claimUserID: user,
claimGroups: strings.Join(groups, ","),
}
if additionalClaims != nil {
for k, v := range additionalClaims {
claims[k] = v
}
}
return &SessionToken{Token: jwt.NewWithClaims(jwt.SigningMethodHS256, claims), claims: claims}
}
// ParseSessionToken recovers the session
func (m *Manager) ParseSessionToken(r *http.Request) (*SessionToken, error) {
cookie, err := r.Cookie(sessionTokenName)
if cookie == nil || err != nil {
return nil, nil
}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Found cookie for %s; %s: %s", r.URL, sessionTokenName, cookie.Value)
}
token, err := jwt.Parse(cookie.Value, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return m.hmac, nil
})
if err != nil {
return nil, err
} else if claims, ok := token.Claims.(jwt.MapClaims); ok {
st := &SessionToken{Token: token, claims: claims}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Resolved session token %v", st)
}
return st, nil
}
return nil, fmt.Errorf("Failed to parse token claims")
}
|
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"path/filepath"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/skatteetaten/ao/pkg/prompt"
)
var ocp3URLPatterns = &ServiceURLPatterns{
ClusterURLPattern: "https://%s-master.paas.skead.no:8443",
BooberURLPattern: "http://boober-aurora.%s.paas.skead.no",
UpdateURLPattern: "http://ao-aurora-tools.%s.paas.skead.no",
GoboURLPattern: "http://gobo.aurora.%s.paas.skead.no",
}
var ocp4URLPatterns = &ServiceURLPatterns{
ClusterURLPattern: "https://api.%s.paas.skead.no:6443",
ClusterLoginURLPattern: "https://oauth-openshift.apps.%s.paas.skead.no",
BooberURLPattern: "https://boober-aup.apps.%s.paas.skead.no",
UpdateURLPattern: "https://ao-aup.apps.%s.paas.skead.no",
GoboURLPattern: "https://gobo-aup.apps.%s.paas.skead.no",
}
// ClusterConfig information about features and configuration for a cluster.
type ClusterConfig struct {
Type string `json:"type"`
IsAPICluster bool `json:"isApiCluster"`
IsUpdateCluster bool `json:"isUpdateCluster"`
ClusterURLPrefix string `json:"clusterUrlPrefix"`
}
// ServiceURLPatterns contains url patterns for all integrations made with AO.
// %s will be replaced with cluster name. If ClusterURLPrefix in ClusterConfig is specified
// it will be used for ClusterURLPattern and ClusterLoginURLPattern insted of cluster name.
type ServiceURLPatterns struct {
ClusterURLPattern string `json:"clusterUrlPattern"`
ClusterLoginURLPattern string `json:"clusterLoginUrlPattern"`
BooberURLPattern string `json:"booberUrlPattern"`
UpdateURLPattern string `json:"updateUrlPattern"`
GoboURLPattern string `json:"goboUrlPattern"`
}
// ServiceURLs contains all the necessary URLs for integrations made with AO.
type ServiceURLs struct {
BooberURL string
ClusterURL string
ClusterLoginURL string
GoboURL string
}
// AOConfig is a structure of the configuration of ao
type AOConfig struct {
RefName string `json:"refName"`
APICluster string `json:"apiCluster"`
Affiliation string `json:"affiliation"`
Localhost bool `json:"localhost"`
Clusters map[string]*Cluster `json:"clusters"`
ServiceURLPatterns map[string]*ServiceURLPatterns `json:"serviceURLPatterns"`
ClusterConfig map[string]*ClusterConfig `json:"clusterConfig"`
AvailableClusters []string `json:"availableClusters"`
PreferredAPIClusters []string `json:"preferredApiClusters"`
AvailableUpdateClusters []string `json:"availableUpdateClusters"`
ClusterURLPattern string `json:"clusterUrlPattern"`
BooberURLPattern string `json:"booberUrlPattern"`
UpdateURLPattern string `json:"updateUrlPattern"`
GoboURLPattern string `json:"goboUrlPattern"`
FileAOVersion string `json:"aoVersion"` // For detecting possible changes to saved file
}
// DefaultAOConfig is an AOConfig with default values
var DefaultAOConfig = AOConfig{
RefName: "master",
Clusters: make(map[string]*Cluster),
AvailableClusters: []string{"utv", "utv-relay", "test", "test-relay", "prod", "prod-relay"},
PreferredAPIClusters: []string{"utv", "test"},
AvailableUpdateClusters: []string{"utv", "test"},
ClusterURLPattern: ocp3URLPatterns.ClusterURLPattern,
BooberURLPattern: ocp3URLPatterns.BooberURLPattern,
UpdateURLPattern: ocp3URLPatterns.UpdateURLPattern,
GoboURLPattern: ocp3URLPatterns.GoboURLPattern,
FileAOVersion: Version,
}
// GetServiceURLs returns old config if ServiceURLPatterns is empty, else ServiceURLs for a given cluster type
func (ao *AOConfig) GetServiceURLs(clusterName string) (*ServiceURLs, error) {
if len(ao.ServiceURLPatterns) == 0 {
return &ServiceURLs{
BooberURL: fmt.Sprintf(ao.BooberURLPattern, clusterName),
ClusterURL: fmt.Sprintf(ao.ClusterURLPattern, clusterName),
ClusterLoginURL: fmt.Sprintf(ao.ClusterURLPattern, clusterName),
GoboURL: fmt.Sprintf(ao.GoboURLPattern, clusterName),
}, nil
}
clusterConfig := ao.ClusterConfig[clusterName]
if clusterConfig == nil || clusterConfig.Type == "" {
return nil, errors.Errorf("missing cluster type for cluster %s", clusterName)
}
patterns := ao.ServiceURLPatterns[clusterConfig.Type]
if patterns == nil {
return nil, errors.Errorf("missing serviceUrlPatterns for cluster type %s", clusterConfig.Type)
}
clusterPrefix := clusterName
if clusterConfig.ClusterURLPrefix != "" {
clusterPrefix = clusterConfig.ClusterURLPrefix
}
clusterLoginURLPattern := patterns.ClusterURLPattern
if patterns.ClusterLoginURLPattern != "" {
clusterLoginURLPattern = patterns.ClusterLoginURLPattern
}
return &ServiceURLs{
BooberURL: formatNonLocalhostPattern(patterns.BooberURLPattern, clusterName),
ClusterURL: formatNonLocalhostPattern(patterns.ClusterURLPattern, clusterPrefix),
ClusterLoginURL: formatNonLocalhostPattern(clusterLoginURLPattern, clusterPrefix),
GoboURL: formatNonLocalhostPattern(patterns.GoboURLPattern, clusterName),
}, nil
}
// AddMultipleClusterConfig adds a richer cluster configuration for multiple cluster types.
func (ao *AOConfig) AddMultipleClusterConfig() {
newCluster := "utv04"
ao.AvailableClusters = append(ao.AvailableClusters, newCluster)
ao.AvailableUpdateClusters = append([]string{newCluster}, ao.AvailableUpdateClusters...)
ao.ClusterConfig = map[string]*ClusterConfig{
"utv": {
Type: "ocp3",
},
"utv-relay": {
Type: "ocp3",
},
"test": {
Type: "ocp3",
},
"test-relay": {
Type: "ocp3",
},
"prod": {
Type: "ocp3",
},
"prod-relay": {
Type: "ocp3",
},
newCluster: {
Type: "ocp4",
},
}
ao.ServiceURLPatterns = map[string]*ServiceURLPatterns{
"ocp3": ocp3URLPatterns,
"ocp4": ocp4URLPatterns,
}
}
func formatNonLocalhostPattern(pattern string, a ...interface{}) string {
if strings.Contains(pattern, "localhost") {
return pattern
}
return fmt.Sprintf(pattern, a...)
}
// LoadConfigFile loads an AOConfig file from file system
func LoadConfigFile(configLocation string) (*AOConfig, error) {
raw, err := ioutil.ReadFile(configLocation)
if err != nil {
return nil, err
}
var c *AOConfig
err = json.Unmarshal(raw, &c)
if err != nil {
return nil, err
}
return c, nil
}
// WriteConfig writes an AOConfig file to file system
func WriteConfig(ao AOConfig, configLocation string) error {
data, err := json.MarshalIndent(ao, "", " ")
if err != nil {
return fmt.Errorf("While marshaling ao config: %w", err)
}
if err := ioutil.WriteFile(configLocation, data, 0644); err != nil {
return fmt.Errorf("While writing ao config to file: %w", err)
}
return nil
}
// SelectAPICluster returns specified APICluster or makes a priority based selection of an APICluster
func (ao *AOConfig) SelectAPICluster() {
if ao.APICluster != "" {
return
}
for _, name := range ao.PreferredAPIClusters {
cluster, found := ao.Clusters[name]
if !found {
continue
}
if cluster.Reachable {
ao.APICluster = name
return
}
}
for k, cluster := range ao.Clusters {
if cluster.Reachable {
ao.APICluster = k
return
}
}
}
// Update checks for a new version of ao and performs update with an optional interactive confirmation
func (ao *AOConfig) Update(noPrompt bool) error {
url, err := ao.getUpdateURL()
if err != nil {
return err
}
serverVersion, err := GetCurrentVersionFromServer(url)
if err != nil {
return err
}
if !serverVersion.IsNewVersion() {
return errors.New("No update available")
}
if !noPrompt {
if runtime.GOOS == "windows" {
message := fmt.Sprintf("New version of AO is available (%s) - please download from %s", serverVersion.Version, url)
fmt.Println(message)
return nil
}
message := fmt.Sprintf("Do you want to update AO from version %s -> %s?", Version, serverVersion.Version)
update := prompt.Confirm(message, true)
if !update {
return errors.New("Update aborted")
}
}
data, err := GetNewAOClient(url)
if err != nil {
return err
}
err = ao.replaceAO(data)
if err != nil {
return err
}
return nil
}
func (ao *AOConfig) replaceAO(data []byte) error {
executablePath, err := os.Executable()
if err != nil {
return err
}
var releasePath string
// First, we try to write the update to a file in the executable path
releasePath = executablePath + "_" + "update"
err = ioutil.WriteFile(releasePath, data, 0755)
if err != nil {
// Could not write to executable path, typically because binary is installed in /usr/bin or /usr/local/bin
// Try the OS Temp Dir
releasePath = filepath.Join(os.TempDir(), "ao_update")
err = ioutil.WriteFile(releasePath, data, 0755)
if err != nil {
return err
}
}
err = os.Rename(releasePath, executablePath)
if err != nil {
err = errors.New("Could not update AO because it is installed in a different file system than temp: " + err.Error())
return err
}
return nil
}
func (ao *AOConfig) getUpdateURL() (string, error) {
for _, cluster := range ao.AvailableUpdateClusters {
available, found := ao.Clusters[cluster]
logrus.WithField("exists", found).Info("update server", cluster)
if !found || (found && !available.Reachable) {
continue
}
updateURL, err := ao.resolveUpdateURLPattern(cluster)
if err != nil {
logrus.WithField("cluster", available.Name).Warn(err)
continue
}
return updateURL, nil
}
return "", errors.New("could not find any available update servers")
}
func (ao *AOConfig) resolveUpdateURLPattern(clusterName string) (string, error) {
if len(ao.ServiceURLPatterns) == 0 {
return fmt.Sprintf(ao.UpdateURLPattern, clusterName), nil
}
clusterConfig := ao.ClusterConfig[clusterName]
if clusterConfig == nil || clusterConfig.Type == "" {
return "", errors.Errorf("missing cluster type for cluster %s", clusterName)
}
patterns := ao.ServiceURLPatterns[clusterConfig.Type]
if patterns == nil {
return "", errors.Errorf("missing serviceUrlPatterns for cluster type %s", clusterConfig.Type)
}
return formatNonLocalhostPattern(patterns.UpdateURLPattern, clusterName), nil
}
added more ocp4 clusters to beta config (#188)
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"path/filepath"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/skatteetaten/ao/pkg/prompt"
)
var ocp3URLPatterns = &ServiceURLPatterns{
ClusterURLPattern: "https://%s-master.paas.skead.no:8443",
BooberURLPattern: "http://boober-aurora.%s.paas.skead.no",
UpdateURLPattern: "http://ao-aurora-tools.%s.paas.skead.no",
GoboURLPattern: "http://gobo.aurora.%s.paas.skead.no",
}
var ocp4URLPatterns = &ServiceURLPatterns{
ClusterURLPattern: "https://api.%s.paas.skead.no:6443",
ClusterLoginURLPattern: "https://oauth-openshift.apps.%s.paas.skead.no",
BooberURLPattern: "https://boober-aup.apps.%s.paas.skead.no",
UpdateURLPattern: "https://ao-aup.apps.%s.paas.skead.no",
GoboURLPattern: "https://gobo-aup.apps.%s.paas.skead.no",
}
// ClusterConfig information about features and configuration for a cluster.
type ClusterConfig struct {
Type string `json:"type"`
IsAPICluster bool `json:"isApiCluster"`
IsUpdateCluster bool `json:"isUpdateCluster"`
ClusterURLPrefix string `json:"clusterUrlPrefix"`
}
// ServiceURLPatterns contains url patterns for all integrations made with AO.
// %s will be replaced with cluster name. If ClusterURLPrefix in ClusterConfig is specified
// it will be used for ClusterURLPattern and ClusterLoginURLPattern insted of cluster name.
type ServiceURLPatterns struct {
ClusterURLPattern string `json:"clusterUrlPattern"`
ClusterLoginURLPattern string `json:"clusterLoginUrlPattern"`
BooberURLPattern string `json:"booberUrlPattern"`
UpdateURLPattern string `json:"updateUrlPattern"`
GoboURLPattern string `json:"goboUrlPattern"`
}
// ServiceURLs contains all the necessary URLs for integrations made with AO.
type ServiceURLs struct {
BooberURL string
ClusterURL string
ClusterLoginURL string
GoboURL string
}
// AOConfig is a structure of the configuration of ao
type AOConfig struct {
RefName string `json:"refName"`
APICluster string `json:"apiCluster"`
Affiliation string `json:"affiliation"`
Localhost bool `json:"localhost"`
Clusters map[string]*Cluster `json:"clusters"`
ServiceURLPatterns map[string]*ServiceURLPatterns `json:"serviceURLPatterns"`
ClusterConfig map[string]*ClusterConfig `json:"clusterConfig"`
AvailableClusters []string `json:"availableClusters"`
PreferredAPIClusters []string `json:"preferredApiClusters"`
AvailableUpdateClusters []string `json:"availableUpdateClusters"`
ClusterURLPattern string `json:"clusterUrlPattern"`
BooberURLPattern string `json:"booberUrlPattern"`
UpdateURLPattern string `json:"updateUrlPattern"`
GoboURLPattern string `json:"goboUrlPattern"`
FileAOVersion string `json:"aoVersion"` // For detecting possible changes to saved file
}
// DefaultAOConfig is an AOConfig with default values
var DefaultAOConfig = AOConfig{
RefName: "master",
Clusters: make(map[string]*Cluster),
AvailableClusters: []string{"utv", "utv-relay", "test", "test-relay", "prod", "prod-relay"},
PreferredAPIClusters: []string{"utv", "test"},
AvailableUpdateClusters: []string{"utv", "test"},
ClusterURLPattern: ocp3URLPatterns.ClusterURLPattern,
BooberURLPattern: ocp3URLPatterns.BooberURLPattern,
UpdateURLPattern: ocp3URLPatterns.UpdateURLPattern,
GoboURLPattern: ocp3URLPatterns.GoboURLPattern,
FileAOVersion: Version,
}
// GetServiceURLs returns old config if ServiceURLPatterns is empty, else ServiceURLs for a given cluster type
func (ao *AOConfig) GetServiceURLs(clusterName string) (*ServiceURLs, error) {
if len(ao.ServiceURLPatterns) == 0 {
return &ServiceURLs{
BooberURL: fmt.Sprintf(ao.BooberURLPattern, clusterName),
ClusterURL: fmt.Sprintf(ao.ClusterURLPattern, clusterName),
ClusterLoginURL: fmt.Sprintf(ao.ClusterURLPattern, clusterName),
GoboURL: fmt.Sprintf(ao.GoboURLPattern, clusterName),
}, nil
}
clusterConfig := ao.ClusterConfig[clusterName]
if clusterConfig == nil || clusterConfig.Type == "" {
return nil, errors.Errorf("missing cluster type for cluster %s", clusterName)
}
patterns := ao.ServiceURLPatterns[clusterConfig.Type]
if patterns == nil {
return nil, errors.Errorf("missing serviceUrlPatterns for cluster type %s", clusterConfig.Type)
}
clusterPrefix := clusterName
if clusterConfig.ClusterURLPrefix != "" {
clusterPrefix = clusterConfig.ClusterURLPrefix
}
clusterLoginURLPattern := patterns.ClusterURLPattern
if patterns.ClusterLoginURLPattern != "" {
clusterLoginURLPattern = patterns.ClusterLoginURLPattern
}
return &ServiceURLs{
BooberURL: formatNonLocalhostPattern(patterns.BooberURLPattern, clusterName),
ClusterURL: formatNonLocalhostPattern(patterns.ClusterURLPattern, clusterPrefix),
ClusterLoginURL: formatNonLocalhostPattern(clusterLoginURLPattern, clusterPrefix),
GoboURL: formatNonLocalhostPattern(patterns.GoboURLPattern, clusterName),
}, nil
}
// AddMultipleClusterConfig adds a richer cluster configuration for multiple cluster types.
func (ao *AOConfig) AddMultipleClusterConfig() {
ao.ClusterConfig = map[string]*ClusterConfig{
"utv": {
Type: "ocp3",
},
"utv-relay": {
Type: "ocp3",
},
"test": {
Type: "ocp3",
},
"test-relay": {
Type: "ocp3",
},
"prod": {
Type: "ocp3",
},
"prod-relay": {
Type: "ocp3",
},
}
ocp4Clusters := []string{"prod01", "test01", "utv04"}
for _, cluster := range ocp4Clusters {
ao.AvailableClusters = append(ao.AvailableClusters, cluster)
ao.PreferredAPIClusters = append([]string{cluster}, ao.PreferredAPIClusters...)
ao.AvailableUpdateClusters = append([]string{cluster}, ao.AvailableUpdateClusters...)
ao.ClusterConfig[cluster] = &ClusterConfig{
Type: "ocp4",
}
}
ao.ServiceURLPatterns = map[string]*ServiceURLPatterns{
"ocp3": ocp3URLPatterns,
"ocp4": ocp4URLPatterns,
}
}
func formatNonLocalhostPattern(pattern string, a ...interface{}) string {
if strings.Contains(pattern, "localhost") {
return pattern
}
return fmt.Sprintf(pattern, a...)
}
// LoadConfigFile loads an AOConfig file from file system
func LoadConfigFile(configLocation string) (*AOConfig, error) {
raw, err := ioutil.ReadFile(configLocation)
if err != nil {
return nil, err
}
var c *AOConfig
err = json.Unmarshal(raw, &c)
if err != nil {
return nil, err
}
return c, nil
}
// WriteConfig writes an AOConfig file to file system
func WriteConfig(ao AOConfig, configLocation string) error {
data, err := json.MarshalIndent(ao, "", " ")
if err != nil {
return fmt.Errorf("While marshaling ao config: %w", err)
}
if err := ioutil.WriteFile(configLocation, data, 0644); err != nil {
return fmt.Errorf("While writing ao config to file: %w", err)
}
return nil
}
// SelectAPICluster returns specified APICluster or makes a priority based selection of an APICluster
func (ao *AOConfig) SelectAPICluster() {
if ao.APICluster != "" {
return
}
for _, name := range ao.PreferredAPIClusters {
cluster, found := ao.Clusters[name]
if !found {
continue
}
if cluster.Reachable {
ao.APICluster = name
return
}
}
for k, cluster := range ao.Clusters {
if cluster.Reachable {
ao.APICluster = k
return
}
}
}
// Update checks for a new version of ao and performs update with an optional interactive confirmation
func (ao *AOConfig) Update(noPrompt bool) error {
url, err := ao.getUpdateURL()
if err != nil {
return err
}
serverVersion, err := GetCurrentVersionFromServer(url)
if err != nil {
return err
}
if !serverVersion.IsNewVersion() {
return errors.New("No update available")
}
if !noPrompt {
if runtime.GOOS == "windows" {
message := fmt.Sprintf("New version of AO is available (%s) - please download from %s", serverVersion.Version, url)
fmt.Println(message)
return nil
}
message := fmt.Sprintf("Do you want to update AO from version %s -> %s?", Version, serverVersion.Version)
update := prompt.Confirm(message, true)
if !update {
return errors.New("Update aborted")
}
}
data, err := GetNewAOClient(url)
if err != nil {
return err
}
err = ao.replaceAO(data)
if err != nil {
return err
}
return nil
}
func (ao *AOConfig) replaceAO(data []byte) error {
executablePath, err := os.Executable()
if err != nil {
return err
}
var releasePath string
// First, we try to write the update to a file in the executable path
releasePath = executablePath + "_" + "update"
err = ioutil.WriteFile(releasePath, data, 0755)
if err != nil {
// Could not write to executable path, typically because binary is installed in /usr/bin or /usr/local/bin
// Try the OS Temp Dir
releasePath = filepath.Join(os.TempDir(), "ao_update")
err = ioutil.WriteFile(releasePath, data, 0755)
if err != nil {
return err
}
}
err = os.Rename(releasePath, executablePath)
if err != nil {
err = errors.New("Could not update AO because it is installed in a different file system than temp: " + err.Error())
return err
}
return nil
}
func (ao *AOConfig) getUpdateURL() (string, error) {
for _, cluster := range ao.AvailableUpdateClusters {
available, found := ao.Clusters[cluster]
logrus.WithField("exists", found).Info("update server", cluster)
if !found || (found && !available.Reachable) {
continue
}
updateURL, err := ao.resolveUpdateURLPattern(cluster)
if err != nil {
logrus.WithField("cluster", available.Name).Warn(err)
continue
}
return updateURL, nil
}
return "", errors.New("could not find any available update servers")
}
func (ao *AOConfig) resolveUpdateURLPattern(clusterName string) (string, error) {
if len(ao.ServiceURLPatterns) == 0 {
return fmt.Sprintf(ao.UpdateURLPattern, clusterName), nil
}
clusterConfig := ao.ClusterConfig[clusterName]
if clusterConfig == nil || clusterConfig.Type == "" {
return "", errors.Errorf("missing cluster type for cluster %s", clusterName)
}
patterns := ao.ServiceURLPatterns[clusterConfig.Type]
if patterns == nil {
return "", errors.Errorf("missing serviceUrlPatterns for cluster type %s", clusterConfig.Type)
}
return formatNonLocalhostPattern(patterns.UpdateURLPattern, clusterName), nil
}
|
// Copyright 2018 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
//go:generate bash -c "echo '// AUTOGENERATED FILE' > generated.go"
//go:generate bash -c "echo 'package html' > generated.go"
//go:generate bash -c "echo 'const style = `' >> generated.go"
//go:generate bash -c "cat ../../dashboard/app/static/style.css >> generated.go"
//go:generate bash -c "echo '`' >> generated.go"
//go:generate bash -c "echo 'const js = `' >> generated.go"
//go:generate bash -c "cat ../../dashboard/app/static/common.js >> generated.go"
//go:generate bash -c "echo '`' >> generated.go"
package html
import (
"fmt"
"html/template"
"strings"
texttemplate "text/template"
"time"
"github.com/google/syzkaller/dashboard/dashapi"
)
func CreatePage(page string) *template.Template {
const headTempl = `<style type="text/css" media="screen">%v</style><script>%v</script>`
page = strings.Replace(page, "{{HEAD}}", fmt.Sprintf(headTempl, style, js), 1)
return template.Must(template.New("").Funcs(Funcs).Parse(page))
}
func CreateGlob(glob string) *template.Template {
return template.Must(template.New("").Funcs(Funcs).ParseGlob(glob))
}
func CreateTextGlob(glob string) *texttemplate.Template {
return texttemplate.Must(texttemplate.New("").Funcs(texttemplate.FuncMap(Funcs)).ParseGlob(glob))
}
var Funcs = template.FuncMap{
"link": link,
"optlink": optlink,
"formatTime": FormatTime,
"formatKernelTime": formatKernelTime,
"formatClock": formatClock,
"formatDuration": formatDuration,
"formatLateness": formatLateness,
"formatReproLevel": formatReproLevel,
"formatStat": formatStat,
"formatShortHash": formatShortHash,
"formatTagHash": formatTagHash,
"formatCommitTableTitle": formatCommitTableTitle,
"formatList": formatStringList,
}
func link(url, text string) template.HTML {
text = template.HTMLEscapeString(text)
if url != "" {
text = fmt.Sprintf(`<a href="%v">%v</a>`, url, text)
}
return template.HTML(text)
}
func optlink(url, text string) template.HTML {
if url == "" {
return template.HTML("")
}
return link(url, text)
}
func FormatTime(t time.Time) string {
if t.IsZero() {
return ""
}
return t.Format("2006/01/02 15:04")
}
func formatKernelTime(t time.Time) string {
if t.IsZero() {
return ""
}
// This is how dates appear in git log.
return t.Format("Mon Jan 2 15:04:05 2006 -0700")
}
func formatClock(t time.Time) string {
if t.IsZero() {
return ""
}
return t.Format("15:04")
}
func formatDuration(d time.Duration) string {
if d == 0 {
return ""
}
days := int(d / (24 * time.Hour))
hours := int(d / time.Hour % 24)
mins := int(d / time.Minute % 60)
if days >= 10 {
return fmt.Sprintf("%vd", days)
} else if days != 0 {
return fmt.Sprintf("%vd%02vh", days, hours)
} else if hours != 0 {
return fmt.Sprintf("%vh%02vm", hours, mins)
}
return fmt.Sprintf("%vm", mins)
}
func formatLateness(now, t time.Time) string {
if t.IsZero() {
return "never"
}
d := now.Sub(t)
if d < 5*time.Minute {
return "now"
}
return formatDuration(d)
}
func formatReproLevel(l dashapi.ReproLevel) string {
switch l {
case dashapi.ReproLevelSyz:
return "syz"
case dashapi.ReproLevelC:
return "C"
default:
return ""
}
}
func formatStat(v int64) string {
if v == 0 {
return ""
}
return fmt.Sprint(v)
}
func formatShortHash(v string) string {
const hashLen = 8
if len(v) <= hashLen {
return v
}
return v[:hashLen]
}
func formatTagHash(v string) string {
// Note: Fixes/References commit tags should include 12-char hash. Don't change this const.
const hashLen = 12
if len(v) <= hashLen {
return v
}
return v[:hashLen]
}
func formatCommitTableTitle(v string) string {
// This function is very specific to how we format tables in text emails.
// Truncate commit title so that whole line fits into 78 chars.
const commitTitleLen = 51
if len(v) <= commitTitleLen {
return v
}
return v[:commitTitleLen-2] + ".."
}
func formatStringList(list []string) string {
return strings.Join(list, ", ")
}
pkg/html: add reference for 12-char hashes in tags
// Copyright 2018 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
//go:generate bash -c "echo '// AUTOGENERATED FILE' > generated.go"
//go:generate bash -c "echo 'package html' > generated.go"
//go:generate bash -c "echo 'const style = `' >> generated.go"
//go:generate bash -c "cat ../../dashboard/app/static/style.css >> generated.go"
//go:generate bash -c "echo '`' >> generated.go"
//go:generate bash -c "echo 'const js = `' >> generated.go"
//go:generate bash -c "cat ../../dashboard/app/static/common.js >> generated.go"
//go:generate bash -c "echo '`' >> generated.go"
package html
import (
"fmt"
"html/template"
"strings"
texttemplate "text/template"
"time"
"github.com/google/syzkaller/dashboard/dashapi"
)
func CreatePage(page string) *template.Template {
const headTempl = `<style type="text/css" media="screen">%v</style><script>%v</script>`
page = strings.Replace(page, "{{HEAD}}", fmt.Sprintf(headTempl, style, js), 1)
return template.Must(template.New("").Funcs(Funcs).Parse(page))
}
func CreateGlob(glob string) *template.Template {
return template.Must(template.New("").Funcs(Funcs).ParseGlob(glob))
}
func CreateTextGlob(glob string) *texttemplate.Template {
return texttemplate.Must(texttemplate.New("").Funcs(texttemplate.FuncMap(Funcs)).ParseGlob(glob))
}
var Funcs = template.FuncMap{
"link": link,
"optlink": optlink,
"formatTime": FormatTime,
"formatKernelTime": formatKernelTime,
"formatClock": formatClock,
"formatDuration": formatDuration,
"formatLateness": formatLateness,
"formatReproLevel": formatReproLevel,
"formatStat": formatStat,
"formatShortHash": formatShortHash,
"formatTagHash": formatTagHash,
"formatCommitTableTitle": formatCommitTableTitle,
"formatList": formatStringList,
}
func link(url, text string) template.HTML {
text = template.HTMLEscapeString(text)
if url != "" {
text = fmt.Sprintf(`<a href="%v">%v</a>`, url, text)
}
return template.HTML(text)
}
func optlink(url, text string) template.HTML {
if url == "" {
return template.HTML("")
}
return link(url, text)
}
func FormatTime(t time.Time) string {
if t.IsZero() {
return ""
}
return t.Format("2006/01/02 15:04")
}
func formatKernelTime(t time.Time) string {
if t.IsZero() {
return ""
}
// This is how dates appear in git log.
return t.Format("Mon Jan 2 15:04:05 2006 -0700")
}
func formatClock(t time.Time) string {
if t.IsZero() {
return ""
}
return t.Format("15:04")
}
func formatDuration(d time.Duration) string {
if d == 0 {
return ""
}
days := int(d / (24 * time.Hour))
hours := int(d / time.Hour % 24)
mins := int(d / time.Minute % 60)
if days >= 10 {
return fmt.Sprintf("%vd", days)
} else if days != 0 {
return fmt.Sprintf("%vd%02vh", days, hours)
} else if hours != 0 {
return fmt.Sprintf("%vh%02vm", hours, mins)
}
return fmt.Sprintf("%vm", mins)
}
func formatLateness(now, t time.Time) string {
if t.IsZero() {
return "never"
}
d := now.Sub(t)
if d < 5*time.Minute {
return "now"
}
return formatDuration(d)
}
func formatReproLevel(l dashapi.ReproLevel) string {
switch l {
case dashapi.ReproLevelSyz:
return "syz"
case dashapi.ReproLevelC:
return "C"
default:
return ""
}
}
func formatStat(v int64) string {
if v == 0 {
return ""
}
return fmt.Sprint(v)
}
func formatShortHash(v string) string {
const hashLen = 8
if len(v) <= hashLen {
return v
}
return v[:hashLen]
}
func formatTagHash(v string) string {
// Note: Fixes/References commit tags should include 12-char hash
// (see Documentation/process/submitting-patches.rst). Don't change this const.
const hashLen = 12
if len(v) <= hashLen {
return v
}
return v[:hashLen]
}
func formatCommitTableTitle(v string) string {
// This function is very specific to how we format tables in text emails.
// Truncate commit title so that whole line fits into 78 chars.
const commitTitleLen = 51
if len(v) <= commitTitleLen {
return v
}
return v[:commitTitleLen-2] + ".."
}
func formatStringList(list []string) string {
return strings.Join(list, ", ")
}
|
// +build linux
package fs
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
type FreezerGroup struct {
}
func (s *FreezerGroup) Name() string {
return "freezer"
}
func (s *FreezerGroup) Apply(path string, d *cgroupData) error {
return join(path, d.pid)
}
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
switch cgroup.Resources.Freezer {
case configs.Frozen:
// As per older kernel docs (freezer-subsystem.txt before
// kernel commit ef9fe980c6fcc1821), if FREEZING is seen,
// userspace should either retry or thaw. While current
// kernel cgroup v1 docs no longer mention a need to retry,
// the kernel (tested on v5.4, Ubuntu 20.04) can't reliably
// freeze a cgroup while new processes keep appearing in it
// (either via fork/clone or by writing new PIDs to
// cgroup.procs).
//
// The numbers below are chosen to have a decent chance to
// succeed even in the worst case scenario (runc pause/unpause
// with parallel runc exec).
//
// Adding any amount of sleep in between retries did not
// increase the chances of successful freeze.
for i := 0; i < 1000; i++ {
if i%50 == 49 {
// Briefly thawing the cgroup also helps.
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
time.Sleep(10 * time.Millisecond)
}
if err := fscommon.WriteFile(path, "freezer.state", string(configs.Frozen)); err != nil {
return err
}
state, err := fscommon.ReadFile(path, "freezer.state")
if err != nil {
return err
}
state = strings.TrimSpace(state)
switch state {
case "FREEZING":
continue
case string(configs.Frozen):
if i > 1 {
logrus.Debugf("frozen after %d retries", i)
}
return nil
default:
// should never happen
return fmt.Errorf("unexpected state %s while freezing", strings.TrimSpace(state))
}
}
// Despite our best efforts, it got stuck in FREEZING.
// Leaving it in this state is bad and dangerous, so
// let's (try to) thaw it back and error out.
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
return errors.New("unable to freeze")
case configs.Thawed:
return fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
case configs.Undefined:
return nil
default:
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
}
}
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}
func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) {
for {
state, err := fscommon.ReadFile(path, "freezer.state")
if err != nil {
// If the kernel is too old, then we just treat the freezer as
// being in an "undefined" state.
if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) {
err = nil
}
return configs.Undefined, err
}
switch strings.TrimSpace(state) {
case "THAWED":
return configs.Thawed, nil
case "FROZEN":
return configs.Frozen, nil
case "FREEZING":
// Make sure we get a stable freezer state, so retry if the cgroup
// is still undergoing freezing. This should be a temporary delay.
time.Sleep(1 * time.Millisecond)
continue
default:
return configs.Undefined, fmt.Errorf("unknown freezer.state %q", state)
}
}
}
libct/cg/fs/freezer: make sure to thaw on failure
Function (*FreezerGroup).Set has a few paths where in can return an
error. In any case, if an error is returned, we failed to freeze,
and we need to thaw to avoid leaving the cgroup in a stuck state.
Signed-off-by: Kir Kolyshkin <3a017b8ddb3f9cf3e4a59978b004111bdeb97f08@gmail.com>
// +build linux
package fs
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
type FreezerGroup struct {
}
func (s *FreezerGroup) Name() string {
return "freezer"
}
func (s *FreezerGroup) Apply(path string, d *cgroupData) error {
return join(path, d.pid)
}
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) (Err error) {
switch cgroup.Resources.Freezer {
case configs.Frozen:
defer func() {
if Err != nil {
// Freezing failed, and it is bad and dangerous
// to leave the cgroup in FROZEN or FREEZING
// state, so (try to) thaw it back.
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
}
}()
// As per older kernel docs (freezer-subsystem.txt before
// kernel commit ef9fe980c6fcc1821), if FREEZING is seen,
// userspace should either retry or thaw. While current
// kernel cgroup v1 docs no longer mention a need to retry,
// the kernel (tested on v5.4, Ubuntu 20.04) can't reliably
// freeze a cgroup while new processes keep appearing in it
// (either via fork/clone or by writing new PIDs to
// cgroup.procs).
//
// The numbers below are chosen to have a decent chance to
// succeed even in the worst case scenario (runc pause/unpause
// with parallel runc exec).
//
// Adding any amount of sleep in between retries did not
// increase the chances of successful freeze.
for i := 0; i < 1000; i++ {
if i%50 == 49 {
// Briefly thawing the cgroup also helps.
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
time.Sleep(10 * time.Millisecond)
}
if err := fscommon.WriteFile(path, "freezer.state", string(configs.Frozen)); err != nil {
return err
}
state, err := fscommon.ReadFile(path, "freezer.state")
if err != nil {
return err
}
state = strings.TrimSpace(state)
switch state {
case "FREEZING":
continue
case string(configs.Frozen):
if i > 1 {
logrus.Debugf("frozen after %d retries", i)
}
return nil
default:
// should never happen
return fmt.Errorf("unexpected state %s while freezing", strings.TrimSpace(state))
}
}
// Despite our best efforts, it got stuck in FREEZING.
return errors.New("unable to freeze")
case configs.Thawed:
return fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
case configs.Undefined:
return nil
default:
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
}
}
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
return nil
}
func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) {
for {
state, err := fscommon.ReadFile(path, "freezer.state")
if err != nil {
// If the kernel is too old, then we just treat the freezer as
// being in an "undefined" state.
if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) {
err = nil
}
return configs.Undefined, err
}
switch strings.TrimSpace(state) {
case "THAWED":
return configs.Thawed, nil
case "FROZEN":
return configs.Frozen, nil
case "FREEZING":
// Make sure we get a stable freezer state, so retry if the cgroup
// is still undergoing freezing. This should be a temporary delay.
time.Sleep(1 * time.Millisecond)
continue
default:
return configs.Undefined, fmt.Errorf("unknown freezer.state %q", state)
}
}
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package site
import (
"context"
"fmt"
"html/template"
"time"
"github.com/google/triage-party/pkg/hubbub"
"github.com/google/triage-party/pkg/triage"
"k8s.io/klog/v2"
)
const (
// OpenStatsName is the name of the rule containing open items stats
OpenStatsName = "__open__"
// VelocityStatsName is the name of the rulee containing velocity stats
VelocityStatsName = "__velocity__"
)
func (h *Handlers) collectionPage(ctx context.Context, id string, refresh bool) (*Page, error) {
start := time.Now()
defer func() {
klog.Infof("Served %q request within %s", id, time.Since(start))
}()
s, err := h.party.LookupCollection(id)
if err != nil {
return nil, fmt.Errorf("lookup collection: %w", err)
}
sts, err := h.party.ListCollections()
if err != nil {
return nil, fmt.Errorf("list collections: %w", err)
}
var result *triage.CollectionResult
if refresh {
result = h.updater.ForceRefresh(ctx, id)
klog.Infof("refresh %q result: %d items", id, len(result.RuleResults))
} else {
result = h.updater.Lookup(ctx, id, false)
if result == nil {
klog.Errorf("lookup %q returned no data", id)
result = &triage.CollectionResult{}
} else if result.RuleResults == nil {
klog.Errorf("lookup %q returned no results: %+v", id, result)
}
}
total := 0
for _, o := range result.RuleResults {
total += len(o.Items)
}
unique := uniqueItems(result.RuleResults)
p := &Page{
ID: s.ID,
Version: VERSION,
SiteName: h.siteName,
Title: s.Name,
Collection: s,
Collections: sts,
Description: s.Description,
CollectionResult: result,
Total: len(unique),
Types: "Issues",
UniqueItems: unique,
ResultAge: time.Since(result.OldestInput),
Status: h.updater.Status(),
}
if result.RuleResults == nil {
p.Notification = template.HTML(fmt.Sprintf("Gathering data (%d issues examined) ...", h.party.ConversationsTotal()))
} else if p.ResultAge > h.warnAge {
p.Notification = template.HTML(fmt.Sprintf(`Refreshing data in the background. Displayed data may be up to %s old. Use <a href="https://en.wikipedia.org/wiki/Wikipedia:Bypass_your_cache#Bypassing_cache">Shift-Reload</a> to force a data refresh at any time.`, humanDuration(time.Since(result.OldestInput))))
p.Stale = true
}
if result.Collection != nil && result.Collection.Velocity != "" {
p.VelocityStats = h.updater.Lookup(ctx, result.Collection.Velocity, false)
} else {
for _, s := range sts {
if s.UsedForStats {
if s.ID == VelocityStatsName {
p.VelocityStats = h.updater.Lookup(ctx, s.ID, false)
continue
}
// Older configs may not use OpenStatsName
if s.ID == OpenStatsName || p.OpenStats == nil {
p.OpenStats = h.updater.Lookup(ctx, s.ID, false)
continue
}
}
}
}
return p, nil
}
func uniqueItems(results []*triage.RuleResult) []*hubbub.Conversation {
items := []*hubbub.Conversation{}
seen := map[string]bool{}
for _, r := range results {
for _, i := range r.Items {
if !seen[i.URL] {
seen[i.URL] = true
items = append(items, i)
}
}
}
return items
}
Improve gathering data message
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package site
import (
"context"
"fmt"
"html/template"
"time"
"github.com/google/triage-party/pkg/hubbub"
"github.com/google/triage-party/pkg/triage"
"k8s.io/klog/v2"
)
const (
// OpenStatsName is the name of the rule containing open items stats
OpenStatsName = "__open__"
// VelocityStatsName is the name of the rulee containing velocity stats
VelocityStatsName = "__velocity__"
)
func (h *Handlers) collectionPage(ctx context.Context, id string, refresh bool) (*Page, error) {
start := time.Now()
defer func() {
klog.Infof("Served %q request within %s", id, time.Since(start))
}()
s, err := h.party.LookupCollection(id)
if err != nil {
return nil, fmt.Errorf("lookup collection: %w", err)
}
sts, err := h.party.ListCollections()
if err != nil {
return nil, fmt.Errorf("list collections: %w", err)
}
var result *triage.CollectionResult
if refresh {
result = h.updater.ForceRefresh(ctx, id)
klog.Infof("refresh %q result: %d items", id, len(result.RuleResults))
} else {
result = h.updater.Lookup(ctx, id, false)
if result == nil {
klog.Errorf("lookup %q returned no data", id)
result = &triage.CollectionResult{}
} else if result.RuleResults == nil {
klog.Errorf("lookup %q returned no results: %+v", id, result)
}
}
total := 0
for _, o := range result.RuleResults {
total += len(o.Items)
}
unique := uniqueItems(result.RuleResults)
p := &Page{
ID: s.ID,
Version: VERSION,
SiteName: h.siteName,
Title: s.Name,
Collection: s,
Collections: sts,
Description: s.Description,
CollectionResult: result,
Total: len(unique),
Types: "Issues",
UniqueItems: unique,
ResultAge: time.Since(result.OldestInput),
Status: h.updater.Status(),
}
if result.RuleResults == nil {
p.Notification = template.HTML(fmt.Sprintf("No cached data found - performing initial data download (%d issues examined) ...", h.party.ConversationsTotal()))
} else if p.ResultAge > h.warnAge {
p.Notification = template.HTML(fmt.Sprintf(`Refreshing data in the background. Displayed data may be up to %s old. Use <a href="https://en.wikipedia.org/wiki/Wikipedia:Bypass_your_cache#Bypassing_cache">Shift-Reload</a> to force a data refresh at any time.`, humanDuration(time.Since(result.OldestInput))))
p.Stale = true
}
if result.Collection != nil && result.Collection.Velocity != "" {
p.VelocityStats = h.updater.Lookup(ctx, result.Collection.Velocity, false)
} else {
for _, s := range sts {
if s.UsedForStats {
if s.ID == VelocityStatsName {
p.VelocityStats = h.updater.Lookup(ctx, s.ID, false)
continue
}
// Older configs may not use OpenStatsName
if s.ID == OpenStatsName || p.OpenStats == nil {
p.OpenStats = h.updater.Lookup(ctx, s.ID, false)
continue
}
}
}
}
return p, nil
}
func uniqueItems(results []*triage.RuleResult) []*hubbub.Conversation {
items := []*hubbub.Conversation{}
seen := map[string]bool{}
for _, r := range results {
for _, i := range r.Items {
if !seen[i.URL] {
seen[i.URL] = true
items = append(items, i)
}
}
}
return items
}
|
// Copyright 2018 the LinuxBoot Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uefi
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/linuxboot/fiano/pkg/guid"
)
// FVFileType represents the different types possible in an EFI file.
type FVFileType uint8
// UEFI FV File types.
const (
FVFileTypeAll FVFileType = iota
FVFileTypeRaw
FVFileTypeFreeForm
FVFileTypeSECCore
FVFileTypePEICore
FVFileTypeDXECore
FVFileTypePEIM
FVFileTypeDriver
FVFileTypeCombinedPEIMDriver
FVFileTypeApplication
FVFileTypeSMM
FVFileTypeVolumeImage
FVFileTypeCombinedSMMDXE
FVFileTypeSMMCore
FVFileTypeSMMStandalone
FVFileTypeSMMCoreStandalone
FVFileTypeOEMMin FVFileType = 0xC0
FVFileTypeOEMMax FVFileType = 0xDF
FVFileTypeDebugMin FVFileType = 0xE0
FVFileTypeDebugMax FVFileType = 0xEF
FVFileTypePad FVFileType = 0xF0
FVFileTypeFFSMin FVFileType = 0xF0
FVFileTypeFFSMax FVFileType = 0xFF
)
// SupportedFiles is a list of files types which will be parsed. File types not
// on this list are treated as opaque binary blobs.
var SupportedFiles = map[FVFileType]bool{
// These are the file types that we'll actually try to parse sections for.
FVFileTypeFreeForm: true,
FVFileTypeSECCore: true,
FVFileTypePEICore: true,
FVFileTypeDXECore: true,
// TODO: Commenting out this line prevents PEI modules from being
// decompressed. This solves the problem of PEI being too big when recompressed.
//FVFileTypePEIM: true,
FVFileTypeDriver: true,
FVFileTypeCombinedPEIMDriver: true,
FVFileTypeApplication: true,
FVFileTypeSMM: true,
FVFileTypeVolumeImage: true,
FVFileTypeCombinedSMMDXE: true,
FVFileTypeSMMCore: true,
FVFileTypeSMMStandalone: true,
FVFileTypeSMMCoreStandalone: true,
}
var fileTypeNames = map[FVFileType]string{
FVFileTypeRaw: "EFI_FV_FILETYPE_RAW",
FVFileTypeFreeForm: "EFI_FV_FILETYPE_FREEFORM",
FVFileTypeSECCore: "EFI_FV_FILETYPE_SECURITY_CORE",
FVFileTypePEICore: "EFI_FV_FILETYPE_PEI_CORE",
FVFileTypeDXECore: "EFI_FV_FILETYPE_DXE_CORE",
FVFileTypePEIM: "EFI_FV_FILETYPE_PEIM",
FVFileTypeDriver: "EFI_FV_FILETYPE_DRIVER",
FVFileTypeCombinedPEIMDriver: "EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER",
FVFileTypeApplication: "EFI_FV_FILETYPE_APPLICATION",
FVFileTypeSMM: "EFI_FV_FILETYPE_MM",
FVFileTypeVolumeImage: "EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE",
FVFileTypeCombinedSMMDXE: "EFI_FV_FILETYPE_COMBINED_MM_DXE",
FVFileTypeSMMCore: "EFI_FV_FILETYPE_MM_CORE",
FVFileTypeSMMStandalone: "EFI_FV_FILETYPE_MM_STANDALONE",
FVFileTypeSMMCoreStandalone: "EFI_FV_FILETYPE_MM_CORE_STANDALONE",
}
// String creates a string representation for the file type.
func (f FVFileType) String() string {
switch {
case FVFileTypeOEMMin <= f && f <= FVFileTypeOEMMax:
return fmt.Sprintf("EFI_FV_FILETYPE_OEM (%#x)", uint8(f))
case FVFileTypeDebugMin <= f && f <= FVFileTypeDebugMax:
return fmt.Sprintf("EFI_FV_FILETYPE_DEBUG (%#x)", uint8(f))
// We use the non-inclusive '<' operator here because pad files belong
// to the FFS filetype, but are also their own type.
case FVFileTypeFFSMin < f && f <= FVFileTypeFFSMax:
return fmt.Sprintf("EFI_FV_FILETYPE_FFS (%#x)", uint8(f))
case f == FVFileTypePad:
return "EFI_FV_FILETYPE_FFS_PAD"
}
if t, ok := fileTypeNames[f]; ok {
return t
}
return "UNKNOWN"
}
// Stock GUIDS
var (
ZeroGUID = guid.MustParse("00000000-0000-0000-0000-000000000000")
FFGUID = guid.MustParse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF")
)
// FileAlignments specifies the correct alignments based on the field in the file header.
var fileAlignments = []uint64{
// These alignments not computable, we have to look them up.
1,
16,
128,
512,
1024,
4 * 1024,
32 * 1024,
64 * 1024,
128 * 1024,
256 * 1024,
512 * 1024,
1024 * 1024,
2 * 1024 * 1024,
4 * 1024 * 1024,
8 * 1024 * 1024,
16 * 1024 * 1024,
}
const (
// FileHeaderMinLength is the minimum length of a firmware file header.
FileHeaderMinLength = 0x18
// FileHeaderExtMinLength is the minimum length of an extended firmware file header.
FileHeaderExtMinLength = 0x20
// EmptyBodyChecksum is the value placed in the File IntegrityCheck field if the body checksum bit isn't set.
EmptyBodyChecksum uint8 = 0xAA
)
// IntegrityCheck holds the two 8 bit checksums for the file header and body separately.
type IntegrityCheck struct {
Header uint8
File uint8
}
type fileAttr uint8
// FileHeader represents an EFI File header.
type FileHeader struct {
GUID guid.GUID // This is the GUID of the file.
Checksum IntegrityCheck `json:"-"`
Type FVFileType
Attributes fileAttr
Size [3]uint8 `json:"-"`
State uint8
}
// IsLarge checks if the large file attribute is set.
func (a fileAttr) IsLarge() bool {
return a&0x01 != 0
}
// GetAlignment returns the byte alignment specified by the file header.
func (a fileAttr) GetAlignment() uint64 {
alignVal := (a & 0x38) >> 3
alignVal |= (a & 0x02) << 2
return fileAlignments[alignVal]
}
// Sets the large file attribute.
func (a *fileAttr) setLarge(large bool) {
if large {
*a |= 0x01
} else {
*a &= 0xFE
}
}
// HasChecksum checks if we need to checksum the file body.
func (a fileAttr) HasChecksum() bool {
return a&0x40 != 0
}
// HeaderLen returns the length of the file header depending on the file size.
func (f *File) HeaderLen() uint64 {
if f.Header.Attributes.IsLarge() {
return FileHeaderExtMinLength
}
return FileHeaderMinLength
}
// ChecksumHeader returns a checksum of the header.
func (f *File) ChecksumHeader() uint8 {
fh := f.Header
headerSize := FileHeaderMinLength
if fh.Attributes.IsLarge() {
headerSize = FileHeaderExtMinLength
}
// Sum over header without State and IntegrityCheck.File.
// To do that we just sum over the whole header and subtract.
// UEFI PI Spec 3.2.3 EFI_FFS_FILE_HEADER
sum := Checksum8(f.buf[:headerSize])
sum -= fh.Checksum.File
sum -= fh.State
return sum
}
// FileHeaderExtended represents an EFI File header with the
// large file attribute set.
// We also use this as the generic header for all EFI files, regardless of whether
// they are actually large. This makes it easier for us to just return one type
// All sizes are also copied into the ExtendedSize field so we only have to check once
type FileHeaderExtended struct {
FileHeader
ExtendedSize uint64 `json:"-"`
}
// File represents an EFI File.
type File struct {
Header FileHeaderExtended
Type string
// a File can contain either Sections or an NVarStore but not both
Sections []*Section `json:",omitempty"`
NVarStore *NVarStore `json:",omitempty"`
//Metadata for extraction and recovery
buf []byte
ExtractPath string
DataOffset uint64
}
// Buf returns the buffer.
// Used mostly for things interacting with the Firmware interface.
func (f *File) Buf() []byte {
return f.buf
}
// SetBuf sets the buffer.
// Used mostly for things interacting with the Firmware interface.
func (f *File) SetBuf(buf []byte) {
f.buf = buf
}
// Apply calls the visitor on the File.
func (f *File) Apply(v Visitor) error {
return v.Visit(f)
}
// ApplyChildren calls the visitor on each child node of File.
func (f *File) ApplyChildren(v Visitor) error {
if f.NVarStore != nil {
if err := f.NVarStore.Apply(v); err != nil {
return err
}
return nil
}
for _, s := range f.Sections {
if err := s.Apply(v); err != nil {
return err
}
}
return nil
}
// SetSize sets the size into the File struct.
// If resizeFile is true, if the file is too large the file will be enlarged to make space
// for the ExtendedHeader
func (f *File) SetSize(size uint64, resizeFile bool) {
fh := &f.Header
// See if we need the extended size
// Check if size > 3 bytes size field
fh.ExtendedSize = size
fh.Attributes.setLarge(false)
if fh.ExtendedSize > 0xFFFFFF {
// Can't fit, need extended header
if resizeFile {
// Increase the file size by the additional space needed
// for the extended header.
fh.ExtendedSize += FileHeaderExtMinLength - FileHeaderMinLength
}
fh.Attributes.setLarge(true)
}
// This will set size to 0xFFFFFF if too big.
fh.Size = Write3Size(fh.ExtendedSize)
}
// ChecksumAndAssemble takes in the fileData and assembles the file binary
func (f *File) ChecksumAndAssemble(fileData []byte) error {
// Checksum the header and body, then write out the header.
// To checksum the header we write the temporary header to the file buffer first.
fh := &f.Header
header := new(bytes.Buffer)
err := binary.Write(header, binary.LittleEndian, fh)
if err != nil {
return fmt.Errorf("unable to construct binary header of file %v, got %v",
fh.GUID, err)
}
f.buf = header.Bytes()
// We need to get rid of whatever it sums to so that the overall sum is zero
// Sorry about the name :(
fh.Checksum.Header -= f.ChecksumHeader()
// Checksum the body
fh.Checksum.File = EmptyBodyChecksum
if fh.Attributes.HasChecksum() {
// if the empty checksum had been set to 0 instead of 0xAA
// this could have been a bit nicer. BUT NOOOOOOO.
fh.Checksum.File = 0 - Checksum8(fileData)
}
// Write out the updated header to the buffer with the new checksums.
// Write the extended header only if the large attribute flag is set.
header = new(bytes.Buffer)
if fh.Attributes.IsLarge() {
err = binary.Write(header, binary.LittleEndian, fh)
} else {
err = binary.Write(header, binary.LittleEndian, fh.FileHeader)
}
if err != nil {
return err
}
f.buf = header.Bytes()
f.buf = append(f.buf, fileData...)
return nil
}
// CreatePadFile creates an empty pad file in order to align the next file.
func CreatePadFile(size uint64) (*File, error) {
if size < FileHeaderMinLength {
return nil, fmt.Errorf("size too small! min size required is %#x bytes, requested %#x",
FileHeaderMinLength, size)
}
f := File{}
fh := &f.Header
// Create empty guid
if Attributes.ErasePolarity == 0xFF {
fh.GUID = *FFGUID
} else if Attributes.ErasePolarity == 0 {
fh.GUID = *ZeroGUID
} else {
return nil, fmt.Errorf("erase polarity not 0x00 or 0xFF, got %#x", Attributes.ErasePolarity)
}
// TODO: I see examples of this where the attributes are just 0 and not dependent on the
// erase polarity. Is that right? Check and handle.
fh.Attributes = 0
// Set the size. If the file is too big, we take up more of the padding for the header.
// This also sets the large file attribute if file is big.
f.SetSize(size, false)
fh.Type = FVFileTypePad
f.Type = fh.Type.String()
// Create empty pad filedata based on size
var fileData []byte
fileData = make([]byte, size-FileHeaderMinLength)
if fh.Attributes.IsLarge() {
fileData = make([]byte, size-FileHeaderExtMinLength)
}
// Fill with empty bytes
for i, dataLen := 0, len(fileData); i < dataLen; i++ {
fileData[i] = Attributes.ErasePolarity
}
fh.State = 0x07 ^ Attributes.ErasePolarity
// Everything has been setup. Checksum and create.
if err := f.ChecksumAndAssemble(fileData); err != nil {
return nil, err
}
return &f, nil
}
// NewFile parses a sequence of bytes and returns a File
// object, if a valid one is passed, or an error. If no error is returned and the File
// pointer is nil, it means we've reached the volume free space at the end of the FV.
func NewFile(buf []byte) (*File, error) {
f := File{}
f.DataOffset = FileHeaderMinLength
// Read in standard header.
r := bytes.NewReader(buf)
if err := binary.Read(r, binary.LittleEndian, &f.Header.FileHeader); err != nil {
return nil, err
}
// Map type to string.
f.Type = f.Header.Type.String()
// TODO: Check Attribute flag as well. How important is the attribute flag? we already
// have FFFFFF in the size
if f.Header.Size == [3]uint8{0xFF, 0xFF, 0xFF} {
// Extended Header
if err := binary.Read(r, binary.LittleEndian, &f.Header.ExtendedSize); err != nil {
return nil, err
}
if f.Header.ExtendedSize == 0xFFFFFFFFFFFFFFFF {
// Start of free space
// Note: this is not a pad file. Pad files also have valid headers.
return nil, nil
}
f.DataOffset = FileHeaderExtMinLength
} else {
// Copy small size into big for easier handling.
// Damn the 3 byte sizes.
f.Header.ExtendedSize = Read3Size(f.Header.Size)
}
if buflen := len(buf); f.Header.ExtendedSize > uint64(buflen) {
return nil, fmt.Errorf("File size too big! File with GUID: %v has length %v, but is only %v bytes big",
f.Header.GUID, f.Header.ExtendedSize, buflen)
}
// Copy out the buffer.
newBuf := buf[:f.Header.ExtendedSize]
f.buf = make([]byte, f.Header.ExtendedSize)
copy(f.buf, newBuf)
// Special case for NVAR Store stored in raw file
if f.Header.Type == FVFileTypeRaw && f.Header.GUID == *NVAR {
ns, err := NewNVarStore(f.buf[f.DataOffset:])
if err != nil {
return nil, fmt.Errorf("error parsing NVAR store in file %v: %v", f.Header.GUID, err)
}
f.NVarStore = ns
}
// Parse sections
if _, ok := SupportedFiles[f.Header.Type]; !ok {
return &f, nil
}
for i, offset := 0, f.DataOffset; offset < f.Header.ExtendedSize; i++ {
s, err := NewSection(f.buf[offset:], i)
if err != nil {
return nil, fmt.Errorf("error parsing sections of file %v: %v", f.Header.GUID, err)
}
offset += uint64(s.Header.ExtendedSize)
// Align to 4 bytes for now. The PI Spec doesn't say what alignment it should be
// but UEFITool aligns to 4 bytes, and this seems to work on everything I have.
offset = Align4(offset)
f.Sections = append(f.Sections, s)
}
return &f, nil
}
print warning but do not fail if NVRam can not be parsed (#271)
It's ok if we can't parse NVRam variables, just print a warning.
Signed-off-by: Ronald G. Minnich <f569d0af9a3481c4007b95c7180d6c41009f2513@gmail.com>
// Copyright 2018 the LinuxBoot Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uefi
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"github.com/linuxboot/fiano/pkg/guid"
)
// FVFileType represents the different types possible in an EFI file.
type FVFileType uint8
// UEFI FV File types.
const (
FVFileTypeAll FVFileType = iota
FVFileTypeRaw
FVFileTypeFreeForm
FVFileTypeSECCore
FVFileTypePEICore
FVFileTypeDXECore
FVFileTypePEIM
FVFileTypeDriver
FVFileTypeCombinedPEIMDriver
FVFileTypeApplication
FVFileTypeSMM
FVFileTypeVolumeImage
FVFileTypeCombinedSMMDXE
FVFileTypeSMMCore
FVFileTypeSMMStandalone
FVFileTypeSMMCoreStandalone
FVFileTypeOEMMin FVFileType = 0xC0
FVFileTypeOEMMax FVFileType = 0xDF
FVFileTypeDebugMin FVFileType = 0xE0
FVFileTypeDebugMax FVFileType = 0xEF
FVFileTypePad FVFileType = 0xF0
FVFileTypeFFSMin FVFileType = 0xF0
FVFileTypeFFSMax FVFileType = 0xFF
)
// SupportedFiles is a list of files types which will be parsed. File types not
// on this list are treated as opaque binary blobs.
var SupportedFiles = map[FVFileType]bool{
// These are the file types that we'll actually try to parse sections for.
FVFileTypeFreeForm: true,
FVFileTypeSECCore: true,
FVFileTypePEICore: true,
FVFileTypeDXECore: true,
// TODO: Commenting out this line prevents PEI modules from being
// decompressed. This solves the problem of PEI being too big when recompressed.
//FVFileTypePEIM: true,
FVFileTypeDriver: true,
FVFileTypeCombinedPEIMDriver: true,
FVFileTypeApplication: true,
FVFileTypeSMM: true,
FVFileTypeVolumeImage: true,
FVFileTypeCombinedSMMDXE: true,
FVFileTypeSMMCore: true,
FVFileTypeSMMStandalone: true,
FVFileTypeSMMCoreStandalone: true,
}
var fileTypeNames = map[FVFileType]string{
FVFileTypeRaw: "EFI_FV_FILETYPE_RAW",
FVFileTypeFreeForm: "EFI_FV_FILETYPE_FREEFORM",
FVFileTypeSECCore: "EFI_FV_FILETYPE_SECURITY_CORE",
FVFileTypePEICore: "EFI_FV_FILETYPE_PEI_CORE",
FVFileTypeDXECore: "EFI_FV_FILETYPE_DXE_CORE",
FVFileTypePEIM: "EFI_FV_FILETYPE_PEIM",
FVFileTypeDriver: "EFI_FV_FILETYPE_DRIVER",
FVFileTypeCombinedPEIMDriver: "EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER",
FVFileTypeApplication: "EFI_FV_FILETYPE_APPLICATION",
FVFileTypeSMM: "EFI_FV_FILETYPE_MM",
FVFileTypeVolumeImage: "EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE",
FVFileTypeCombinedSMMDXE: "EFI_FV_FILETYPE_COMBINED_MM_DXE",
FVFileTypeSMMCore: "EFI_FV_FILETYPE_MM_CORE",
FVFileTypeSMMStandalone: "EFI_FV_FILETYPE_MM_STANDALONE",
FVFileTypeSMMCoreStandalone: "EFI_FV_FILETYPE_MM_CORE_STANDALONE",
}
// String creates a string representation for the file type.
func (f FVFileType) String() string {
switch {
case FVFileTypeOEMMin <= f && f <= FVFileTypeOEMMax:
return fmt.Sprintf("EFI_FV_FILETYPE_OEM (%#x)", uint8(f))
case FVFileTypeDebugMin <= f && f <= FVFileTypeDebugMax:
return fmt.Sprintf("EFI_FV_FILETYPE_DEBUG (%#x)", uint8(f))
// We use the non-inclusive '<' operator here because pad files belong
// to the FFS filetype, but are also their own type.
case FVFileTypeFFSMin < f && f <= FVFileTypeFFSMax:
return fmt.Sprintf("EFI_FV_FILETYPE_FFS (%#x)", uint8(f))
case f == FVFileTypePad:
return "EFI_FV_FILETYPE_FFS_PAD"
}
if t, ok := fileTypeNames[f]; ok {
return t
}
return "UNKNOWN"
}
// Stock GUIDS
var (
ZeroGUID = guid.MustParse("00000000-0000-0000-0000-000000000000")
FFGUID = guid.MustParse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF")
)
// FileAlignments specifies the correct alignments based on the field in the file header.
var fileAlignments = []uint64{
// These alignments not computable, we have to look them up.
1,
16,
128,
512,
1024,
4 * 1024,
32 * 1024,
64 * 1024,
128 * 1024,
256 * 1024,
512 * 1024,
1024 * 1024,
2 * 1024 * 1024,
4 * 1024 * 1024,
8 * 1024 * 1024,
16 * 1024 * 1024,
}
const (
// FileHeaderMinLength is the minimum length of a firmware file header.
FileHeaderMinLength = 0x18
// FileHeaderExtMinLength is the minimum length of an extended firmware file header.
FileHeaderExtMinLength = 0x20
// EmptyBodyChecksum is the value placed in the File IntegrityCheck field if the body checksum bit isn't set.
EmptyBodyChecksum uint8 = 0xAA
)
// IntegrityCheck holds the two 8 bit checksums for the file header and body separately.
type IntegrityCheck struct {
Header uint8
File uint8
}
type fileAttr uint8
// FileHeader represents an EFI File header.
type FileHeader struct {
GUID guid.GUID // This is the GUID of the file.
Checksum IntegrityCheck `json:"-"`
Type FVFileType
Attributes fileAttr
Size [3]uint8 `json:"-"`
State uint8
}
// IsLarge checks if the large file attribute is set.
func (a fileAttr) IsLarge() bool {
return a&0x01 != 0
}
// GetAlignment returns the byte alignment specified by the file header.
func (a fileAttr) GetAlignment() uint64 {
alignVal := (a & 0x38) >> 3
alignVal |= (a & 0x02) << 2
return fileAlignments[alignVal]
}
// Sets the large file attribute.
func (a *fileAttr) setLarge(large bool) {
if large {
*a |= 0x01
} else {
*a &= 0xFE
}
}
// HasChecksum checks if we need to checksum the file body.
func (a fileAttr) HasChecksum() bool {
return a&0x40 != 0
}
// HeaderLen returns the length of the file header depending on the file size.
func (f *File) HeaderLen() uint64 {
if f.Header.Attributes.IsLarge() {
return FileHeaderExtMinLength
}
return FileHeaderMinLength
}
// ChecksumHeader returns a checksum of the header.
func (f *File) ChecksumHeader() uint8 {
fh := f.Header
headerSize := FileHeaderMinLength
if fh.Attributes.IsLarge() {
headerSize = FileHeaderExtMinLength
}
// Sum over header without State and IntegrityCheck.File.
// To do that we just sum over the whole header and subtract.
// UEFI PI Spec 3.2.3 EFI_FFS_FILE_HEADER
sum := Checksum8(f.buf[:headerSize])
sum -= fh.Checksum.File
sum -= fh.State
return sum
}
// FileHeaderExtended represents an EFI File header with the
// large file attribute set.
// We also use this as the generic header for all EFI files, regardless of whether
// they are actually large. This makes it easier for us to just return one type
// All sizes are also copied into the ExtendedSize field so we only have to check once
type FileHeaderExtended struct {
FileHeader
ExtendedSize uint64 `json:"-"`
}
// File represents an EFI File.
type File struct {
Header FileHeaderExtended
Type string
// a File can contain either Sections or an NVarStore but not both
Sections []*Section `json:",omitempty"`
NVarStore *NVarStore `json:",omitempty"`
//Metadata for extraction and recovery
buf []byte
ExtractPath string
DataOffset uint64
}
// Buf returns the buffer.
// Used mostly for things interacting with the Firmware interface.
func (f *File) Buf() []byte {
return f.buf
}
// SetBuf sets the buffer.
// Used mostly for things interacting with the Firmware interface.
func (f *File) SetBuf(buf []byte) {
f.buf = buf
}
// Apply calls the visitor on the File.
func (f *File) Apply(v Visitor) error {
return v.Visit(f)
}
// ApplyChildren calls the visitor on each child node of File.
func (f *File) ApplyChildren(v Visitor) error {
if f.NVarStore != nil {
if err := f.NVarStore.Apply(v); err != nil {
return err
}
return nil
}
for _, s := range f.Sections {
if err := s.Apply(v); err != nil {
return err
}
}
return nil
}
// SetSize sets the size into the File struct.
// If resizeFile is true, if the file is too large the file will be enlarged to make space
// for the ExtendedHeader
func (f *File) SetSize(size uint64, resizeFile bool) {
fh := &f.Header
// See if we need the extended size
// Check if size > 3 bytes size field
fh.ExtendedSize = size
fh.Attributes.setLarge(false)
if fh.ExtendedSize > 0xFFFFFF {
// Can't fit, need extended header
if resizeFile {
// Increase the file size by the additional space needed
// for the extended header.
fh.ExtendedSize += FileHeaderExtMinLength - FileHeaderMinLength
}
fh.Attributes.setLarge(true)
}
// This will set size to 0xFFFFFF if too big.
fh.Size = Write3Size(fh.ExtendedSize)
}
// ChecksumAndAssemble takes in the fileData and assembles the file binary
func (f *File) ChecksumAndAssemble(fileData []byte) error {
// Checksum the header and body, then write out the header.
// To checksum the header we write the temporary header to the file buffer first.
fh := &f.Header
header := new(bytes.Buffer)
err := binary.Write(header, binary.LittleEndian, fh)
if err != nil {
return fmt.Errorf("unable to construct binary header of file %v, got %v",
fh.GUID, err)
}
f.buf = header.Bytes()
// We need to get rid of whatever it sums to so that the overall sum is zero
// Sorry about the name :(
fh.Checksum.Header -= f.ChecksumHeader()
// Checksum the body
fh.Checksum.File = EmptyBodyChecksum
if fh.Attributes.HasChecksum() {
// if the empty checksum had been set to 0 instead of 0xAA
// this could have been a bit nicer. BUT NOOOOOOO.
fh.Checksum.File = 0 - Checksum8(fileData)
}
// Write out the updated header to the buffer with the new checksums.
// Write the extended header only if the large attribute flag is set.
header = new(bytes.Buffer)
if fh.Attributes.IsLarge() {
err = binary.Write(header, binary.LittleEndian, fh)
} else {
err = binary.Write(header, binary.LittleEndian, fh.FileHeader)
}
if err != nil {
return err
}
f.buf = header.Bytes()
f.buf = append(f.buf, fileData...)
return nil
}
// CreatePadFile creates an empty pad file in order to align the next file.
func CreatePadFile(size uint64) (*File, error) {
if size < FileHeaderMinLength {
return nil, fmt.Errorf("size too small! min size required is %#x bytes, requested %#x",
FileHeaderMinLength, size)
}
f := File{}
fh := &f.Header
// Create empty guid
if Attributes.ErasePolarity == 0xFF {
fh.GUID = *FFGUID
} else if Attributes.ErasePolarity == 0 {
fh.GUID = *ZeroGUID
} else {
return nil, fmt.Errorf("erase polarity not 0x00 or 0xFF, got %#x", Attributes.ErasePolarity)
}
// TODO: I see examples of this where the attributes are just 0 and not dependent on the
// erase polarity. Is that right? Check and handle.
fh.Attributes = 0
// Set the size. If the file is too big, we take up more of the padding for the header.
// This also sets the large file attribute if file is big.
f.SetSize(size, false)
fh.Type = FVFileTypePad
f.Type = fh.Type.String()
// Create empty pad filedata based on size
var fileData []byte
fileData = make([]byte, size-FileHeaderMinLength)
if fh.Attributes.IsLarge() {
fileData = make([]byte, size-FileHeaderExtMinLength)
}
// Fill with empty bytes
for i, dataLen := 0, len(fileData); i < dataLen; i++ {
fileData[i] = Attributes.ErasePolarity
}
fh.State = 0x07 ^ Attributes.ErasePolarity
// Everything has been setup. Checksum and create.
if err := f.ChecksumAndAssemble(fileData); err != nil {
return nil, err
}
return &f, nil
}
// NewFile parses a sequence of bytes and returns a File
// object, if a valid one is passed, or an error. If no error is returned and the File
// pointer is nil, it means we've reached the volume free space at the end of the FV.
func NewFile(buf []byte) (*File, error) {
f := File{}
f.DataOffset = FileHeaderMinLength
// Read in standard header.
r := bytes.NewReader(buf)
if err := binary.Read(r, binary.LittleEndian, &f.Header.FileHeader); err != nil {
return nil, err
}
// Map type to string.
f.Type = f.Header.Type.String()
// TODO: Check Attribute flag as well. How important is the attribute flag? we already
// have FFFFFF in the size
if f.Header.Size == [3]uint8{0xFF, 0xFF, 0xFF} {
// Extended Header
if err := binary.Read(r, binary.LittleEndian, &f.Header.ExtendedSize); err != nil {
return nil, err
}
if f.Header.ExtendedSize == 0xFFFFFFFFFFFFFFFF {
// Start of free space
// Note: this is not a pad file. Pad files also have valid headers.
return nil, nil
}
f.DataOffset = FileHeaderExtMinLength
} else {
// Copy small size into big for easier handling.
// Damn the 3 byte sizes.
f.Header.ExtendedSize = Read3Size(f.Header.Size)
}
if buflen := len(buf); f.Header.ExtendedSize > uint64(buflen) {
return nil, fmt.Errorf("File size too big! File with GUID: %v has length %v, but is only %v bytes big",
f.Header.GUID, f.Header.ExtendedSize, buflen)
}
// Copy out the buffer.
newBuf := buf[:f.Header.ExtendedSize]
f.buf = make([]byte, f.Header.ExtendedSize)
copy(f.buf, newBuf)
// Special case for NVAR Store stored in raw file
if f.Header.Type == FVFileTypeRaw && f.Header.GUID == *NVAR {
ns, err := NewNVarStore(f.buf[f.DataOffset:])
if err != nil {
log.Printf("error parsing NVAR store in file %v: %v", f.Header.GUID, err)
}
// Note that ns is nil if there was an error, so this assign is fine either way.
f.NVarStore = ns
}
// Parse sections
if _, ok := SupportedFiles[f.Header.Type]; !ok {
return &f, nil
}
for i, offset := 0, f.DataOffset; offset < f.Header.ExtendedSize; i++ {
s, err := NewSection(f.buf[offset:], i)
if err != nil {
return nil, fmt.Errorf("error parsing sections of file %v: %v", f.Header.GUID, err)
}
offset += uint64(s.Header.ExtendedSize)
// Align to 4 bytes for now. The PI Spec doesn't say what alignment it should be
// but UEFITool aligns to 4 bytes, and this seems to work on everything I have.
offset = Align4(offset)
f.Sections = append(f.Sections, s)
}
return &f, nil
}
|
package util
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"path"
"strings"
"time"
)
var random *rand.Rand
func FetchURL(url string) ([]byte, error) {
if strings.HasPrefix(strings.ToLower(url), "http://") ||
strings.HasPrefix(strings.ToLower(url), "https://") {
c := &http.Client{}
resp, err := c.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
}
return ioutil.ReadFile(url)
}
func JoinURL(baseURL string, secondURL string) string {
// only combine path part if baseURL is url not a local file and it make sense
base, err := url.Parse(baseURL)
if err == nil {
base.Path = path.Join(base.Path, secondURL)
return base.String()
}
return path.Join(baseURL, secondURL)
}
func GetMapChild(keys []string, m map[string]interface{}) (interface{}, error) {
var innerMap map[string]interface{}
var v interface{}
var has, ok bool
for i := range keys {
if innerMap == nil {
innerMap = m
} else {
innerMap, ok = v.(map[string]interface{})
if !ok {
return nil, errors.New("key not found " + keys[i])
}
}
v, has = innerMap[keys[i]]
if !has {
return nil, errors.New("key not found " + keys[i])
}
}
return v, nil
}
func MergeStringMaps(a map[string]string, b map[string]string) map[string]string {
if a == nil {
a = make(map[string]string)
}
out := make(map[string]string)
for k, v := range a {
out[k] = v
}
for k, v := range b {
out[k] = v
}
return out
}
func GenerateRandomString(chars string, length int) string {
if random == nil {
random = rand.New(rand.NewSource(time.Now().UnixNano()))
}
randStr := make([]byte, length)
charsLen := len(chars)
for i := 0; i < length; i++ {
randStr[i] = chars[random.Intn(charsLen)]
}
return string(randStr)
}
func GenerateRandomGroup() string {
return GenerateRandomString("abcdefghijklmnopqrstuvwxyz", 1) + GenerateRandomString("abcdefghijklmnopqrstuvwxyz1234567890", 7)
}
func StringToBool(s string) (bool, error) {
s = strings.ToLower(s)
if s == "true" || s == "yes" || s == "y" || s == "ok" || s == "t" || s == "1" {
return true, nil
} else if s == "false" || s == "n" || s == "no" || s == "f" || s == "0" {
return false, nil
}
return false, fmt.Errorf("can't parse \"%s\" as boolean", s)
}
// Relative returns true when p is a relative path
// returns false otherwise: absolute path, http(s)://url, or an address
// with github.com prefix
func Relative(p string) bool {
return len(p) == 0 || p[0] == '.' ||
(!strings.HasPrefix(p, "github.com") &&
!strings.HasPrefix(p, "http://") && !strings.HasPrefix(p, "https://"))
}
Added support for repos hosted on github
package util
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"path"
"strings"
"time"
)
var random *rand.Rand
// githubize transforms the user/repo github address to a valid url
func githubize(url string) (string, error) {
splitOnSlash := strings.Split(url, "/")
if len(splitOnSlash) < 3 {
return "", errors.New("invalid github address")
}
splitOnSlash = splitOnSlash[1:] // strip out "github.com""
return fmt.Sprintf("%s%s",
"https://",
path.Join("raw.githubusercontent.com",
splitOnSlash[0],
splitOnSlash[1],
"master",
path.Join(splitOnSlash[2:]...))), nil
}
func FetchURL(url string) ([]byte, error) {
if strings.HasPrefix(strings.ToLower(url), "http://") ||
strings.HasPrefix(strings.ToLower(url), "https://") {
c := &http.Client{}
resp, err := c.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
} else if strings.HasPrefix(url, "github.com") {
var err error
if githubizedURL, err := githubize(url); err == nil {
return FetchURL(githubizedURL)
}
return []byte{}, err
}
return ioutil.ReadFile(url)
}
func JoinURL(baseURL string, secondURL string) string {
// only combine path part if baseURL is url not a local file and it make sense
base, err := url.Parse(baseURL)
if err == nil {
base.Path = path.Join(base.Path, secondURL)
return base.String()
}
return path.Join(baseURL, secondURL)
}
func GetMapChild(keys []string, m map[string]interface{}) (interface{}, error) {
var innerMap map[string]interface{}
var v interface{}
var has, ok bool
for i := range keys {
if innerMap == nil {
innerMap = m
} else {
innerMap, ok = v.(map[string]interface{})
if !ok {
return nil, errors.New("key not found " + keys[i])
}
}
v, has = innerMap[keys[i]]
if !has {
return nil, errors.New("key not found " + keys[i])
}
}
return v, nil
}
func MergeStringMaps(a map[string]string, b map[string]string) map[string]string {
if a == nil {
a = make(map[string]string)
}
out := make(map[string]string)
for k, v := range a {
out[k] = v
}
for k, v := range b {
out[k] = v
}
return out
}
func GenerateRandomString(chars string, length int) string {
if random == nil {
random = rand.New(rand.NewSource(time.Now().UnixNano()))
}
randStr := make([]byte, length)
charsLen := len(chars)
for i := 0; i < length; i++ {
randStr[i] = chars[random.Intn(charsLen)]
}
return string(randStr)
}
func GenerateRandomGroup() string {
return GenerateRandomString("abcdefghijklmnopqrstuvwxyz", 1) + GenerateRandomString("abcdefghijklmnopqrstuvwxyz1234567890", 7)
}
func StringToBool(s string) (bool, error) {
s = strings.ToLower(s)
if s == "true" || s == "yes" || s == "y" || s == "ok" || s == "t" || s == "1" {
return true, nil
} else if s == "false" || s == "n" || s == "no" || s == "f" || s == "0" {
return false, nil
}
return false, fmt.Errorf("can't parse \"%s\" as boolean", s)
}
// Relative returns true when p is a relative path
// returns false otherwise: absolute path, http(s)://url, or an address
// with github.com prefix
func Relative(p string) bool {
return len(p) == 0 || p[0] == '.' ||
(!strings.HasPrefix(p, "github.com") &&
!strings.HasPrefix(p, "http://") && !strings.HasPrefix(p, "https://"))
}
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type along with support code.
//
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"fmt"
"path"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/mod/semver"
errors "golang.org/x/xerrors"
)
// A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string
// Version is usually a semantic version in canonical form.
// There are three exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
// Third, filesystem paths found in "replace" directives are
// represented by a path with an empty version.
Version string `json:",omitempty"`
}
// String returns a representation of the Version suitable for logging
// (Path@Version, or just Path if Version is empty).
func (m Version) String() string {
if m.Version == "" {
return m.Path
}
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module.
type ModuleError struct {
Path string
Version string
Err error
}
// VersionError returns a ModuleError derived from a Version and error,
// or err itself if it is already such an error.
func VersionError(v Version, err error) error {
var mErr *ModuleError
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
return err
}
return &ModuleError{
Path: v.Path,
Version: v.Version,
Err: err,
}
}
func (e *ModuleError) Error() string {
if v, ok := e.Err.(*InvalidVersionError); ok {
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
}
if e.Version != "" {
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
}
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
}
func (e *ModuleError) Unwrap() error { return e.Err }
// An InvalidVersionError indicates an error specific to a version, with the
// module path unknown or specified externally.
//
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
// must not wrap a ModuleError.
type InvalidVersionError struct {
Version string
Pseudo bool
Err error
}
// noun returns either "version" or "pseudo-version", depending on whether
// e.Version is a pseudo-version.
func (e *InvalidVersionError) noun() string {
if e.Pseudo {
return "pseudo-version"
}
return "version"
}
func (e *InvalidVersionError) Error() string {
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
}
func (e *InvalidVersionError) Unwrap() error { return e.Err }
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return &ModuleError{
Path: path,
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
}
}
_, pathMajor, _ := SplitPathVersion(path)
if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err}
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// modPathOK reports whether r can appear in a module path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
//
// This matches what "go get" has historically recognized in import paths,
// and avoids confusing sequences like '%20' or '+' that would change meaning
// if used in a URL.
//
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see "escaped paths" above).
func modPathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// modPathOK reports whether r can appear in a package import path element.
//
// Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some
// binary names (such as '++' as a suffix for compiler binaries and wrappers).
func importPathOK(r rune) bool {
return modPathOK(r) || r == '+'
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "escaped paths" above.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with three additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
// Third, no path element may begin with a dot.
func CheckPath(path string) error {
if err := checkPath(path, modulePath); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err)
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("malformed module path %q: leading slash", path)
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
}
if path[0] == '-' {
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("malformed module path %q: invalid version", path)
}
return nil
}
// CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
// It must not end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on). The element
// must not have a suffix of a tilde followed by one or more ASCII digits
// (to exclude paths elements that look like Windows short-names).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error {
if err := checkPath(path, importPath); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
}
return nil
}
// pathKind indicates what kind of path we're checking. Module paths,
// import paths, and file paths have different restrictions.
type pathKind int
const (
modulePath pathKind = iota
importPath
filePath
)
// checkPath checks that a general path is valid.
// It returns an error describing why but not mentioning path.
// Because these checks apply to both module paths and import paths,
// the caller is expected to add the "malformed ___ path %q: " prefix.
// fileName indicates whether the final element of the path is a file name
// (as opposed to a directory name).
func checkPath(path string, kind pathKind) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if path[0] == '-' {
return fmt.Errorf("leading dash")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], kind); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], kind); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
func checkElem(elem string, kind pathKind) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && kind == modulePath {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
for _, r := range elem {
ok := false
switch kind {
case modulePath:
ok = modPathOK(r)
case importPath:
ok = importPathOK(r)
case filePath:
ok = fileNameOK(r)
default:
panic(fmt.Sprintf("internal error: invalid kind %v", kind))
}
if !ok {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("%q disallowed as path element component on Windows", short)
}
}
if kind == filePath {
// don't check for Windows short-names in file names. They're
// only an issue for import paths.
return nil
}
// Reject path components that look like Windows short-names.
// Those usually end in a tilde followed by one or more ASCII digits.
if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 {
suffix := short[tilde+1:]
suffixIsDigits := true
for _, r := range suffix {
if r < '0' || r > '9' {
suffixIsDigits = false
break
}
}
if suffixIsDigits {
return fmt.Errorf("trailing tilde and digits in path element")
}
}
return nil
}
// CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error {
if err := checkPath(path, filePath); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err)
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor.
func CheckPathMajor(v, pathMajor string) error {
// TODO(jayconrod): return errors or panic for invalid inputs. This function
// (and others) was covered by integration tests for cmd/go, and surrounding
// code protected against invalid inputs like non-canonical versions.
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return nil
}
m := semver.Major(v)
if pathMajor == "" {
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
return nil
}
pathMajor = "v0 or v1"
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
if m == pathMajor[1:] {
return nil
}
pathMajor = pathMajor[1:]
}
return &InvalidVersionError{
Version: v,
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
}
}
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
// An empty PathMajorPrefix allows either v0 or v1.
//
// Note that MatchPathMajor may accept some versions that do not actually begin
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
// pathMajor, even though that pathMajor implies 'v1' tagging.
func PathMajorPrefix(pathMajor string) string {
if pathMajor == "" {
return ""
}
if pathMajor[0] != '/' && pathMajor[0] != '.' {
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
}
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
m := pathMajor[1:]
if m != semver.Major(m) {
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
}
return m
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// EscapePath returns the escaped form of the given module path.
// It fails if the module path is invalid.
func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return escapeString(path)
}
// EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{
Version: v,
Err: fmt.Errorf("disallowed version string"),
}
}
return escapeString(v)
}
func escapeString(s string) (escaped string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// UnescapePath returns the module path for the given escaped path.
// It fails if the escaped path is invalid or describes an invalid path.
func UnescapePath(escaped string) (path string, err error) {
path, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped module path %q", escaped)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
}
return path, nil
}
// UnescapeVersion returns the version string for the given escaped version.
// It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func UnescapeVersion(escaped string) (v string, err error) {
v, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped version %q", escaped)
}
if err := checkElem(v, filePath); err != nil {
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
}
return v, nil
}
func unescapeString(escaped string) (string, bool) {
var buf []byte
bang := false
for _, r := range escaped {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// MatchPrefixPatterns reports whether any path prefix of target matches one of
// the glob patterns (as defined by path.Match) in the comma-separated globs
// list. This implements the algorithm used when matching a module path to the
// GOPRIVATE environment variable, as described by 'go help module-private'.
//
// It ignores any empty or malformed patterns in the list.
func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}
module: add InvalidPathError
CheckPath, CheckImportPath, and CheckFilePath now return this error.
To be used in CL 297634.
Change-Id: Ibc4af7c5593f35216ab3ade0b024971061b8cf97
Reviewed-on: https://go-review.googlesource.com/c/mod/+/297891
Trust: Jay Conrod <f3fadaedc813344da5ec13b41e26596889cc5baa@google.com>
Run-TryBot: Jay Conrod <f3fadaedc813344da5ec13b41e26596889cc5baa@google.com>
TryBot-Result: Go Bot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Bryan C. Mills <1c8aad60184261ccede67f5c63a0d2a3bf3c9ff4@google.com>
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type along with support code.
//
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"fmt"
"path"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/mod/semver"
errors "golang.org/x/xerrors"
)
// A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string
// Version is usually a semantic version in canonical form.
// There are three exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
// Third, filesystem paths found in "replace" directives are
// represented by a path with an empty version.
Version string `json:",omitempty"`
}
// String returns a representation of the Version suitable for logging
// (Path@Version, or just Path if Version is empty).
func (m Version) String() string {
if m.Version == "" {
return m.Path
}
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module.
type ModuleError struct {
Path string
Version string
Err error
}
// VersionError returns a ModuleError derived from a Version and error,
// or err itself if it is already such an error.
func VersionError(v Version, err error) error {
var mErr *ModuleError
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
return err
}
return &ModuleError{
Path: v.Path,
Version: v.Version,
Err: err,
}
}
func (e *ModuleError) Error() string {
if v, ok := e.Err.(*InvalidVersionError); ok {
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
}
if e.Version != "" {
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
}
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
}
func (e *ModuleError) Unwrap() error { return e.Err }
// An InvalidVersionError indicates an error specific to a version, with the
// module path unknown or specified externally.
//
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
// must not wrap a ModuleError.
type InvalidVersionError struct {
Version string
Pseudo bool
Err error
}
// noun returns either "version" or "pseudo-version", depending on whether
// e.Version is a pseudo-version.
func (e *InvalidVersionError) noun() string {
if e.Pseudo {
return "pseudo-version"
}
return "version"
}
func (e *InvalidVersionError) Error() string {
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
}
func (e *InvalidVersionError) Unwrap() error { return e.Err }
// An InvalidPathError indicates a module, import, or file path doesn't
// satisfy all naming constraints. See CheckPath, CheckImportPath,
// and CheckFilePath for specific restrictions.
type InvalidPathError struct {
Kind string // "module", "import", or "file"
Path string
Err error
}
func (e *InvalidPathError) Error() string {
return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err)
}
func (e *InvalidPathError) Unwrap() error { return e.Err }
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return &ModuleError{
Path: path,
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
}
}
_, pathMajor, _ := SplitPathVersion(path)
if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err}
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// modPathOK reports whether r can appear in a module path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
//
// This matches what "go get" has historically recognized in import paths,
// and avoids confusing sequences like '%20' or '+' that would change meaning
// if used in a URL.
//
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see "escaped paths" above).
func modPathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// modPathOK reports whether r can appear in a package import path element.
//
// Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some
// binary names (such as '++' as a suffix for compiler binaries and wrappers).
func importPathOK(r rune) bool {
return modPathOK(r) || r == '+'
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "escaped paths" above.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with three additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
// Third, no path element may begin with a dot.
func CheckPath(path string) (err error) {
defer func() {
if err != nil {
err = &InvalidPathError{Kind: "module", Path: path, Err: err}
}
}()
if err := checkPath(path, modulePath); err != nil {
return err
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("leading slash")
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("missing dot in first path element")
}
if path[0] == '-' {
return fmt.Errorf("leading dash in first path element")
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("invalid char %q in first path element", r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("invalid version")
}
return nil
}
// CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
// It must not end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on). The element
// must not have a suffix of a tilde followed by one or more ASCII digits
// (to exclude paths elements that look like Windows short-names).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error {
if err := checkPath(path, importPath); err != nil {
return &InvalidPathError{Kind: "import", Path: path, Err: err}
}
return nil
}
// pathKind indicates what kind of path we're checking. Module paths,
// import paths, and file paths have different restrictions.
type pathKind int
const (
modulePath pathKind = iota
importPath
filePath
)
// checkPath checks that a general path is valid. kind indicates what
// specific constraints should be applied.
//
// checkPath returns an error describing why the path is not valid.
// Because these checks apply to module, import, and file paths,
// and because other checks may be applied, the caller is expected to wrap
// this error with InvalidPathError.
func checkPath(path string, kind pathKind) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if path[0] == '-' {
return fmt.Errorf("leading dash")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], kind); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], kind); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
func checkElem(elem string, kind pathKind) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && kind == modulePath {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
for _, r := range elem {
ok := false
switch kind {
case modulePath:
ok = modPathOK(r)
case importPath:
ok = importPathOK(r)
case filePath:
ok = fileNameOK(r)
default:
panic(fmt.Sprintf("internal error: invalid kind %v", kind))
}
if !ok {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("%q disallowed as path element component on Windows", short)
}
}
if kind == filePath {
// don't check for Windows short-names in file names. They're
// only an issue for import paths.
return nil
}
// Reject path components that look like Windows short-names.
// Those usually end in a tilde followed by one or more ASCII digits.
if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 {
suffix := short[tilde+1:]
suffixIsDigits := true
for _, r := range suffix {
if r < '0' || r > '9' {
suffixIsDigits = false
break
}
}
if suffixIsDigits {
return fmt.Errorf("trailing tilde and digits in path element")
}
}
return nil
}
// CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error {
if err := checkPath(path, filePath); err != nil {
return &InvalidPathError{Kind: "file", Path: path, Err: err}
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor.
func CheckPathMajor(v, pathMajor string) error {
// TODO(jayconrod): return errors or panic for invalid inputs. This function
// (and others) was covered by integration tests for cmd/go, and surrounding
// code protected against invalid inputs like non-canonical versions.
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return nil
}
m := semver.Major(v)
if pathMajor == "" {
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
return nil
}
pathMajor = "v0 or v1"
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
if m == pathMajor[1:] {
return nil
}
pathMajor = pathMajor[1:]
}
return &InvalidVersionError{
Version: v,
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
}
}
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
// An empty PathMajorPrefix allows either v0 or v1.
//
// Note that MatchPathMajor may accept some versions that do not actually begin
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
// pathMajor, even though that pathMajor implies 'v1' tagging.
func PathMajorPrefix(pathMajor string) string {
if pathMajor == "" {
return ""
}
if pathMajor[0] != '/' && pathMajor[0] != '.' {
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
}
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
m := pathMajor[1:]
if m != semver.Major(m) {
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
}
return m
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// EscapePath returns the escaped form of the given module path.
// It fails if the module path is invalid.
func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return escapeString(path)
}
// EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{
Version: v,
Err: fmt.Errorf("disallowed version string"),
}
}
return escapeString(v)
}
func escapeString(s string) (escaped string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// UnescapePath returns the module path for the given escaped path.
// It fails if the escaped path is invalid or describes an invalid path.
func UnescapePath(escaped string) (path string, err error) {
path, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped module path %q", escaped)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
}
return path, nil
}
// UnescapeVersion returns the version string for the given escaped version.
// It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func UnescapeVersion(escaped string) (v string, err error) {
v, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped version %q", escaped)
}
if err := checkElem(v, filePath); err != nil {
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
}
return v, nil
}
func unescapeString(escaped string) (string, bool) {
var buf []byte
bang := false
for _, r := range escaped {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// MatchPrefixPatterns reports whether any path prefix of target matches one of
// the glob patterns (as defined by path.Match) in the comma-separated globs
// list. This implements the algorithm used when matching a module path to the
// GOPRIVATE environment variable, as described by 'go help module-private'.
//
// It ignores any empty or malformed patterns in the list.
func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}
|
package pki_test
import (
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"github.com/cheekybits/is"
"github.com/Avalanche-io/c4/pki"
)
func TestUserSaveLoad(t *testing.T) {
is := is.New(t)
u1, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(u1)
is.NoErr(u1.GenerateKeys())
is.NoErr(u1.Passphrase("some passphrase"))
// Save
data, err := json.Marshal(u1)
is.NoErr(err)
// Load
var u2 pki.User
is.NoErr(json.Unmarshal(data, &u2))
// Does not save the PrivateKey, or Passphrase in the clear
is.NotNil(u2.EncryptedPrivateKey)
is.Nil(u2.ClearPrivateKey)
is.NotNil(u2.EncryptedPassphrase)
is.Nil(u2.ClearPassphrase)
is.NoErr(u2.Passphrase("some passphrase"))
is.NotNil(u2.Private())
var u3 pki.User
is.NoErr(json.Unmarshal(data, &u3))
err = u3.Passphrase("wrong passphrase")
is.Err(err)
is.Equal(err.Error(), "incorrect passphrase")
is.Nil(u3.Private())
}
func TestUserCSR(t *testing.T) {
is := is.New(t)
// Create a Certificate Authority
ca, err := pki.CreateAthorty(pkix.Name{CommonName: "c4.studio.com"}, nil, nil)
is.NoErr(err)
is.NotNil(ca)
// Create Domain Entity
server, err := pki.NewDomain("test")
server.AddIPs(net.ParseIP("127.0.0.1"))
is.NoErr(err)
err = server.GenerateKeys()
is.NoErr(err)
servercsr, err := server.CSR()
is.NoErr(err)
is.NotNil(servercsr)
// CA endorses the domain's CSR.
serverCert, err := ca.Approve(servercsr)
is.NoErr(err)
is.NotNil(serverCert)
server.SetCert(serverCert)
// Create a user
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
user.Passphrase("some passphrase")
csr, err := user.CSR()
is.NoErr(err)
is.NotNil(csr)
// CA endorses the users Certificate Signing Request.
userCert, err := ca.Approve(csr)
is.NoErr(err)
is.NotNil(userCert)
user.SetCert(userCert)
// Test if user cert can be used for TLS connection.
message := []byte("Hello, C4!")
hello := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(message)
})
// Create a pool of trusted certs which include the root CA
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(ca.Cert().PEM())
servTLSCert, err := server.TLScert(pki.TLS_CLISRV)
is.NoErr(err)
// Require client authentication
webserver := httptest.NewUnstartedServer(hello)
webserver.TLS = &tls.Config{
Certificates: []tls.Certificate{servTLSCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
// Produce TLS credentials for client.
clientTLSCert, err := user.TLScert(pki.TLS_CLIONLY)
is.NoErr(err)
// Create a client with
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certPool,
Certificates: []tls.Certificate{clientTLSCert},
},
},
}
// Start the server
webserver.StartTLS()
// Have client make Get request
resp, err := client.Get(webserver.URL)
is.NoErr(err)
// Close server
webserver.Close()
// Read and check response
reply := make([]byte, resp.ContentLength)
body := resp.Body
_, err = body.Read(reply)
if err != nil {
is.Equal(err, io.EOF)
}
is.Equal(reply, message)
data, err := json.Marshal(user)
is.NoErr(err)
var user2 pki.User
err = json.Unmarshal(data, &user2)
is.NoErr(err)
}
func TestUserChangePassphrase(t *testing.T) {
is := is.New(t)
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
is.NotNil(user.Private())
oldpw := "some passphrase"
// set original passphrase
err = user.Passphrase(oldpw)
is.NoErr(err)
newpw := "new passphrase"
err = user.ChangePassphrase(oldpw, newpw)
is.NoErr(err)
// Save
data, err := json.Marshal(user)
is.NoErr(err)
// Load
var user2 pki.User
is.NoErr(json.Unmarshal(data, &user2))
is.Nil(user2.Private())
is.NoErr(user2.Passphrase(newpw))
is.NotNil(user2.Private())
}
func TestUserLogout(t *testing.T) {
is := is.New(t)
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
is.NotNil(user.Private())
oldpw := "some passphrase"
err = user.Passphrase(oldpw)
is.NoErr(err)
is.NotNil(user.EncryptedPrivateKey)
is.NotNil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.NotNil(user.ClearPassphrase)
user.Logout()
is.NoErr(err)
is.NotNil(user.EncryptedPrivateKey)
is.Nil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.Nil(user.ClearPassphrase)
err = user.Passphrase(oldpw)
is.NotNil(user.EncryptedPrivateKey)
is.NotNil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.NotNil(user.ClearPassphrase)
}
Fixed ignored error in TestUserLogout().
package pki_test
import (
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"github.com/cheekybits/is"
"github.com/Avalanche-io/c4/pki"
)
func TestUserSaveLoad(t *testing.T) {
is := is.New(t)
u1, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(u1)
is.NoErr(u1.GenerateKeys())
is.NoErr(u1.Passphrase("some passphrase"))
// Save
data, err := json.Marshal(u1)
is.NoErr(err)
// Load
var u2 pki.User
is.NoErr(json.Unmarshal(data, &u2))
// Does not save the PrivateKey, or Passphrase in the clear
is.NotNil(u2.EncryptedPrivateKey)
is.Nil(u2.ClearPrivateKey)
is.NotNil(u2.EncryptedPassphrase)
is.Nil(u2.ClearPassphrase)
is.NoErr(u2.Passphrase("some passphrase"))
is.NotNil(u2.Private())
var u3 pki.User
is.NoErr(json.Unmarshal(data, &u3))
err = u3.Passphrase("wrong passphrase")
is.Err(err)
is.Equal(err.Error(), "incorrect passphrase")
is.Nil(u3.Private())
}
func TestUserCSR(t *testing.T) {
is := is.New(t)
// Create a Certificate Authority
ca, err := pki.CreateAthorty(pkix.Name{CommonName: "c4.studio.com"}, nil, nil)
is.NoErr(err)
is.NotNil(ca)
// Create Domain Entity
server, err := pki.NewDomain("test")
server.AddIPs(net.ParseIP("127.0.0.1"))
is.NoErr(err)
err = server.GenerateKeys()
is.NoErr(err)
servercsr, err := server.CSR()
is.NoErr(err)
is.NotNil(servercsr)
// CA endorses the domain's CSR.
serverCert, err := ca.Approve(servercsr)
is.NoErr(err)
is.NotNil(serverCert)
server.SetCert(serverCert)
// Create a user
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
user.Passphrase("some passphrase")
csr, err := user.CSR()
is.NoErr(err)
is.NotNil(csr)
// CA endorses the users Certificate Signing Request.
userCert, err := ca.Approve(csr)
is.NoErr(err)
is.NotNil(userCert)
user.SetCert(userCert)
// Test if user cert can be used for TLS connection.
message := []byte("Hello, C4!")
hello := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(message)
})
// Create a pool of trusted certs which include the root CA
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(ca.Cert().PEM())
servTLSCert, err := server.TLScert(pki.TLS_CLISRV)
is.NoErr(err)
// Require client authentication
webserver := httptest.NewUnstartedServer(hello)
webserver.TLS = &tls.Config{
Certificates: []tls.Certificate{servTLSCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
// Produce TLS credentials for client.
clientTLSCert, err := user.TLScert(pki.TLS_CLIONLY)
is.NoErr(err)
// Create a client with
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certPool,
Certificates: []tls.Certificate{clientTLSCert},
},
},
}
// Start the server
webserver.StartTLS()
// Have client make Get request
resp, err := client.Get(webserver.URL)
is.NoErr(err)
// Close server
webserver.Close()
// Read and check response
reply := make([]byte, resp.ContentLength)
body := resp.Body
_, err = body.Read(reply)
if err != nil {
is.Equal(err, io.EOF)
}
is.Equal(reply, message)
data, err := json.Marshal(user)
is.NoErr(err)
var user2 pki.User
err = json.Unmarshal(data, &user2)
is.NoErr(err)
}
func TestUserChangePassphrase(t *testing.T) {
is := is.New(t)
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
is.NotNil(user.Private())
oldpw := "some passphrase"
// set original passphrase
err = user.Passphrase(oldpw)
is.NoErr(err)
newpw := "new passphrase"
err = user.ChangePassphrase(oldpw, newpw)
is.NoErr(err)
// Save
data, err := json.Marshal(user)
is.NoErr(err)
// Load
var user2 pki.User
is.NoErr(json.Unmarshal(data, &user2))
is.Nil(user2.Private())
is.NoErr(user2.Passphrase(newpw))
is.NotNil(user2.Private())
}
func TestUserLogout(t *testing.T) {
is := is.New(t)
user, err := pki.NewUser("john.doe@example.com", pki.EMail)
is.NoErr(err)
is.NotNil(user)
is.NoErr(user.GenerateKeys())
is.NotNil(user.Private())
oldpw := "some passphrase"
err = user.Passphrase(oldpw)
is.NoErr(err)
is.NotNil(user.EncryptedPrivateKey)
is.NotNil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.NotNil(user.ClearPassphrase)
user.Logout()
is.NoErr(err)
is.NotNil(user.EncryptedPrivateKey)
is.Nil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.Nil(user.ClearPassphrase)
err = user.Passphrase(oldpw)
is.NoErr(err)
is.NotNil(user.EncryptedPrivateKey)
is.NotNil(user.ClearPrivateKey)
is.NotNil(user.EncryptedPassphrase)
is.NotNil(user.ClearPassphrase)
}
|
/*
Copyright 2016 Wenhui Shen <www.webx.top>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tplfunc
import (
"fmt"
"html/template"
"log"
"math"
"net/url"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/webx-top/captcha"
"github.com/webx-top/com"
)
func New() (r template.FuncMap) {
r = template.FuncMap{}
for name, function := range TplFuncMap {
r[name] = function
}
return
}
var TplFuncMap template.FuncMap = template.FuncMap{
// ======================
// time
// ======================
"Now": Now,
"ElapsedMemory": com.ElapsedMemory, //内存消耗
"TotalRunTime": com.TotalRunTime, //运行时长(从启动服务时算起)
"CaptchaForm": CaptchaForm, //验证码图片
"FormatByte": com.FormatByte, //字节转为适合理解的格式
"FriendlyTime": FriendlyTime,
"FormatPastTime": com.FormatPastTime, //以前距离现在多长时间
"DateFormat": com.DateFormat,
"DateFormatShort": com.DateFormatShort,
"Ts2time": TsToTime, // 时间戳数字转time.Time
"Ts2date": TsToDate, // 时间戳数字转日期字符串
// ======================
// compare
// ======================
"Eq": Eq,
"Add": Add,
"Sub": Sub,
"Div": Div,
"Mul": Mul,
"IsNil": IsNil,
"IsEmpty": IsEmpty,
"NotEmpty": NotEmpty,
"IsNaN": IsNaN,
"IsInf": IsInf,
// ======================
// conversion type
// ======================
"Html": ToHTML,
"Js": ToJS,
"Css": ToCSS,
"ToJS": ToJS,
"ToCSS": ToCSS,
"ToURL": ToURL,
"ToHTML": ToHTML,
"ToHTMLAttr": ToHTMLAttr,
"ToHTMLAttrs": ToHTMLAttrs,
"ToStrSlice": ToStrSlice,
"ToDuration": ToDuration,
"Str": com.Str,
"Int": com.Int,
"Int32": com.Int32,
"Int64": com.Int64,
"Uint": com.Uint,
"Uint32": com.Uint32,
"Uint64": com.Uint64,
"Float32": com.Float32,
"Float64": com.Float64,
"ToFloat64": ToFloat64,
"ToFixed": ToFixed,
"Math": Math,
"NumberFormat": NumberFormat,
"DurationFormat": DurationFormat,
// ======================
// string
// ======================
"Contains": strings.Contains,
"HasPrefix": strings.HasPrefix,
"HasSuffix": strings.HasSuffix,
"Trim": strings.TrimSpace,
"TrimLeft": strings.TrimLeft,
"TrimRight": strings.TrimRight,
"TrimPrefix": strings.TrimPrefix,
"TrimSuffix": strings.TrimSuffix,
"ToLower": strings.ToLower,
"ToUpper": strings.ToUpper,
"LowerCaseFirst": com.LowerCaseFirst,
"CamelCase": com.CamelCase,
"PascalCase": com.PascalCase,
"SnakeCase": com.SnakeCase,
"Reverse": com.Reverse,
"Ext": filepath.Ext,
"InExt": InExt,
"Concat": Concat,
"Replace": strings.Replace, //strings.Replace(s, old, new, n)
"Split": strings.Split,
"Join": strings.Join,
"Substr": com.Substr,
"StripTags": com.StripTags,
"Nl2br": NlToBr, // \n替换为<br>
"AddSuffix": AddSuffix,
// ======================
// encode & decode
// ======================
"JsonEncode": JsonEncode,
"JsonDecode": JsonDecode,
"UrlEncode": com.UrlEncode,
"UrlDecode": com.UrlDecode,
"Base64Encode": com.Base64Encode,
"Base64Decode": com.Base64Decode,
// ======================
// map & slice
// ======================
"InSlice": com.InSlice,
"InSlicex": com.InSliceIface,
"Set": Set,
"Append": Append,
"InStrSlice": InStrSlice,
"SearchStrSlice": SearchStrSlice,
"URLValues": URLValues,
"ToSlice": ToSlice,
"StrToSlice": StrToSlice,
"StrToStrSlice": StrToStrSlice,
// ======================
// regexp
// ======================
"Regexp": regexp.MustCompile,
"RegexpPOSIX": regexp.MustCompilePOSIX,
// ======================
// other
// ======================
"Ignore": Ignore,
"Default": Default,
}
func JsonEncode(s interface{}) string {
r, _ := com.SetJSON(s)
return r
}
func JsonDecode(s string) map[string]interface{} {
r := map[string]interface{}{}
e := com.GetJSON(&s, &r)
if e != nil {
log.Println(e)
}
return r
}
func Ignore(_ interface{}) interface{} {
return nil
}
func URLValues(values ...interface{}) url.Values {
v := url.Values{}
var k string
for i, j := 0, len(values); i < j; i++ {
if i%2 == 0 {
k = fmt.Sprint(values[i])
continue
}
v.Add(k, fmt.Sprint(values[i]))
k = ``
}
if len(k) > 0 {
v.Add(k, ``)
k = ``
}
return v
}
func ToStrSlice(s ...string) []string {
return s
}
func ToSlice(s ...interface{}) []interface{} {
return s
}
func StrToStrSlice(s string, sep string) []string {
return ToStrSlice(strings.Split(s, sep)...)
}
func StrToSlice(s string, sep string) []interface{} {
ss := strings.Split(s, sep)
r := make([]interface{}, len(ss))
for i, s := range ss {
r[i] = s
}
return r
}
func Concat(s ...string) string {
return strings.Join(s, ``)
}
func InExt(fileName string, exts ...string) bool {
ext := filepath.Ext(fileName)
ext = strings.ToLower(ext)
for _, _ext := range exts {
if ext == strings.ToLower(_ext) {
return true
}
}
return false
}
func Default(defaultV interface{}, v interface{}) interface{} {
switch val := v.(type) {
case nil:
return defaultV
case string:
if len(val) == 0 {
return defaultV
}
case uint8, int8, uint, int, uint32, int32, int64, uint64:
if val == 0 {
return defaultV
}
case float32, float64:
if val == 0.0 {
return defaultV
}
default:
if len(com.Str(v)) == 0 {
return defaultV
}
}
return v
}
func Set(renderArgs map[string]interface{}, key string, value interface{}) string {
renderArgs[key] = value
return ``
}
func Append(renderArgs map[string]interface{}, key string, value interface{}) string {
if renderArgs[key] == nil {
renderArgs[key] = []interface{}{value}
} else {
renderArgs[key] = append(renderArgs[key].([]interface{}), value)
}
return ``
}
//NlToBr Replaces newlines with <br />
func NlToBr(text string) template.HTML {
return template.HTML(Nl2br(text))
}
//CaptchaForm 验证码表单域
func CaptchaForm(args ...interface{}) template.HTML {
id := "captcha"
msg := "页面验证码已经失效,必须重新请求当前页面。确定要刷新本页面吗?"
onErr := "if(this.src.indexOf('?reload=')!=-1 && confirm('%s')) window.location.reload();"
format := `<img id="%[2]sImage" src="/captcha/%[1]s.png" alt="Captcha image" onclick="this.src=this.src.split('?')[0]+'?reload='+Math.random();" onerror="%[3]s" style="cursor:pointer" /><input type="hidden" name="captchaId" id="%[2]sId" value="%[1]s" />`
var customOnErr bool
switch len(args) {
case 3:
switch v := args[2].(type) {
case template.JS:
onErr = string(v)
customOnErr = true
case string:
msg = v
}
fallthrough
case 2:
if args[1] != nil {
v := fmt.Sprint(args[1])
format = v
}
fallthrough
case 1:
switch v := args[0].(type) {
case template.JS:
onErr = string(v)
customOnErr = true
case template.HTML:
format = string(v)
case string:
id = v
}
}
cid := captcha.New()
if !customOnErr {
onErr = fmt.Sprintf(onErr, msg)
}
return template.HTML(fmt.Sprintf(format, cid, id, onErr))
}
//CaptchaVerify 验证码验证
func CaptchaVerify(captchaSolution string, idGet func(string) string) bool {
//id := r.FormValue("captchaId")
id := idGet("captchaId")
if !captcha.VerifyString(id, captchaSolution) {
return false
}
return true
}
//Nl2br 将换行符替换为<br />
func Nl2br(text string) string {
return com.Nl2br(template.HTMLEscapeString(text))
}
func IsNil(a interface{}) bool {
switch a.(type) {
case nil:
return true
}
return false
}
func interface2Int64(value interface{}) (int64, bool) {
switch v := value.(type) {
case uint:
return int64(v), true
case uint8:
return int64(v), true
case uint16:
return int64(v), true
case uint32:
return int64(v), true
case uint64:
return int64(v), true
case int:
return int64(v), true
case int8:
return int64(v), true
case int16:
return int64(v), true
case int32:
return int64(v), true
case int64:
return v, true
default:
return 0, false
}
}
func interface2Float64(value interface{}) (float64, bool) {
switch v := value.(type) {
case float32:
return float64(v), true
case float64:
return v, true
default:
return 0, false
}
}
func ToFloat64(value interface{}) float64 {
if v, ok := interface2Int64(value); ok {
return float64(v)
}
if v, ok := interface2Float64(value); ok {
return v
}
return com.Float64(value)
}
func Add(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool
rleft, isInt = interface2Int64(left)
if !isInt {
fleft, _ = interface2Float64(left)
}
rright, isInt = interface2Int64(right)
if !isInt {
fright, _ = interface2Float64(right)
}
intSum := rleft + rright
if isInt {
return intSum
}
return fleft + fright + float64(intSum)
}
func Div(left interface{}, right interface{}) interface{} {
return ToFloat64(left) / ToFloat64(right)
}
func Mul(left interface{}, right interface{}) interface{} {
return ToFloat64(left) * ToFloat64(right)
}
func Math(op string, args ...interface{}) interface{} {
length := len(args)
if length < 1 {
return float64(0)
}
switch op {
case `mod`: //模
if length < 2 {
return float64(0)
}
return math.Mod(ToFloat64(args[0]), ToFloat64(args[1]))
case `abs`:
return math.Abs(ToFloat64(args[0]))
case `acos`:
return math.Acos(ToFloat64(args[0]))
case `acosh`:
return math.Acosh(ToFloat64(args[0]))
case `asin`:
return math.Asin(ToFloat64(args[0]))
case `asinh`:
return math.Asinh(ToFloat64(args[0]))
case `atan`:
return math.Atan(ToFloat64(args[0]))
case `atan2`:
if length < 2 {
return float64(0)
}
return math.Atan2(ToFloat64(args[0]), ToFloat64(args[1]))
case `atanh`:
return math.Atanh(ToFloat64(args[0]))
case `cbrt`:
return math.Cbrt(ToFloat64(args[0]))
case `ceil`:
return math.Ceil(ToFloat64(args[0]))
case `copysign`:
if length < 2 {
return float64(0)
}
return math.Copysign(ToFloat64(args[0]), ToFloat64(args[1]))
case `cos`:
return math.Cos(ToFloat64(args[0]))
case `cosh`:
return math.Cosh(ToFloat64(args[0]))
case `dim`:
if length < 2 {
return float64(0)
}
return math.Dim(ToFloat64(args[0]), ToFloat64(args[1]))
case `erf`:
return math.Erf(ToFloat64(args[0]))
case `erfc`:
return math.Erfc(ToFloat64(args[0]))
case `exp`:
return math.Exp(ToFloat64(args[0]))
case `exp2`:
return math.Exp2(ToFloat64(args[0]))
case `floor`:
return math.Floor(ToFloat64(args[0]))
case `max`:
if length < 2 {
return float64(0)
}
return math.Max(ToFloat64(args[0]), ToFloat64(args[1]))
case `min`:
if length < 2 {
return float64(0)
}
return math.Min(ToFloat64(args[0]), ToFloat64(args[1]))
case `pow`: //幂
if length < 2 {
return float64(0)
}
return math.Pow(ToFloat64(args[0]), ToFloat64(args[1]))
case `sqrt`: //平方根
return math.Sqrt(ToFloat64(args[0]))
case `sin`:
return math.Sin(ToFloat64(args[0]))
case `log`:
return math.Log(ToFloat64(args[0]))
case `log2`:
return math.Log2(ToFloat64(args[0]))
case `log10`:
return math.Log10(ToFloat64(args[0]))
case `tan`:
return math.Tan(ToFloat64(args[0]))
case `tanh`:
return math.Tanh(ToFloat64(args[0]))
case `add`: //加
if length < 2 {
return float64(0)
}
return Add(ToFloat64(args[0]), ToFloat64(args[1]))
case `sub`: //减
if length < 2 {
return float64(0)
}
return Sub(ToFloat64(args[0]), ToFloat64(args[1]))
case `mul`: //乘
if length < 2 {
return float64(0)
}
return Mul(ToFloat64(args[0]), ToFloat64(args[1]))
case `div`: //除
if length < 2 {
return float64(0)
}
return Div(ToFloat64(args[0]), ToFloat64(args[1]))
}
return nil
}
func IsNaN(v interface{}) bool {
return math.IsNaN(ToFloat64(v))
}
func IsInf(v interface{}, s interface{}) bool {
return math.IsInf(ToFloat64(v), com.Int(s))
}
func Sub(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool
rleft, isInt = interface2Int64(left)
if !isInt {
fleft, _ = interface2Float64(left)
}
rright, isInt = interface2Int64(right)
if !isInt {
fright, _ = interface2Float64(right)
}
if isInt {
return rleft - rright
}
return fleft + float64(rleft) - (fright + float64(rright))
}
func ToFixed(value interface{}, precision interface{}) string {
return fmt.Sprintf("%.*f", com.Int(precision), ToFloat64(value))
}
func Now() time.Time {
return time.Now()
}
func Eq(left interface{}, right interface{}) bool {
leftIsNil := (left == nil)
rightIsNil := (right == nil)
if leftIsNil || rightIsNil {
if leftIsNil && rightIsNil {
return true
}
return false
}
return fmt.Sprintf("%v", left) == fmt.Sprintf("%v", right)
}
func ToHTML(raw string) template.HTML {
return template.HTML(raw)
}
func ToHTMLAttr(raw string) template.HTMLAttr {
return template.HTMLAttr(raw)
}
func ToHTMLAttrs(raw map[string]interface{}) (r map[template.HTMLAttr]interface{}) {
r = make(map[template.HTMLAttr]interface{})
for k, v := range raw {
r[ToHTMLAttr(k)] = v
}
return
}
func ToJS(raw string) template.JS {
return template.JS(raw)
}
func ToCSS(raw string) template.CSS {
return template.CSS(raw)
}
func ToURL(raw string) template.URL {
return template.URL(raw)
}
func AddSuffix(s string, suffix string, args ...string) string {
beforeChar := `.`
if len(args) > 0 {
beforeChar = args[0]
if beforeChar == `` {
return s + suffix
}
}
p := strings.LastIndex(s, beforeChar)
if p < 0 {
return s
}
return s[0:p] + suffix + s[p:]
}
func IsEmpty(a interface{}) bool {
switch v := a.(type) {
case nil:
return true
case string:
return len(v) == 0
case []interface{}:
return len(v) < 1
default:
switch fmt.Sprintf(`%v`, a) {
case `<nil>`, ``, `[]`:
return true
}
}
return false
}
func NotEmpty(a interface{}) bool {
return !IsEmpty(a)
}
func InStrSlice(values []string, value string) bool {
for _, v := range values {
if v == value {
return true
}
}
return false
}
func SearchStrSlice(values []string, value string) int {
for i, v := range values {
if v == value {
return i
}
}
return -1
}
func DurationFormat(lang interface{}, t interface{}, args ...string) *com.Durafmt {
duration := ToDuration(t, args...)
return com.ParseDuration(duration, lang)
}
func ToDuration(t interface{}, args ...string) time.Duration {
td := time.Second
if len(args) > 0 {
switch args[0] {
case `ns`:
td = time.Nanosecond
case `us`:
td = time.Microsecond
case `s`:
td = time.Second
case `ms`:
td = time.Millisecond
case `h`:
td = time.Hour
case `m`:
td = time.Minute
}
}
switch v := t.(type) {
case time.Duration:
return v
case int64:
td = time.Duration(v) * td
case int:
td = time.Duration(v) * td
case uint:
td = time.Duration(v) * td
case int32:
td = time.Duration(v) * td
case uint32:
td = time.Duration(v) * td
case uint64:
td = time.Duration(v) * td
default:
td = time.Duration(com.Int64(t)) * td
}
return td
}
func FriendlyTime(t interface{}, args ...string) string {
var td time.Duration
switch v := t.(type) {
case time.Duration:
td = v
case int64:
td = time.Duration(v)
case int:
td = time.Duration(v)
case uint:
td = time.Duration(v)
case int32:
td = time.Duration(v)
case uint32:
td = time.Duration(v)
case uint64:
td = time.Duration(v)
default:
td = time.Duration(com.Int64(t))
}
return com.FriendlyTime(td, args...)
}
func TsToTime(timestamp interface{}) time.Time {
return TimestampToTime(timestamp)
}
func TsToDate(format string, timestamp interface{}) string {
t := TimestampToTime(timestamp)
if t.IsZero() {
return ``
}
return t.Format(format)
}
func TimestampToTime(timestamp interface{}) time.Time {
var ts int64
switch v := timestamp.(type) {
case int64:
ts = v
case uint:
ts = int64(v)
case int:
ts = int64(v)
case uint32:
ts = int64(v)
case int32:
ts = int64(v)
case uint64:
ts = int64(v)
default:
i, e := strconv.ParseInt(fmt.Sprint(timestamp), 10, 64)
if e != nil {
log.Println(e)
}
ts = i
}
return time.Unix(ts, 0)
}
func NumberFormat(number interface{}, precision int, delim ...string) string {
r := fmt.Sprintf(`%.*f`, precision, ToFloat64(number))
d := `,`
if len(delim) > 0 {
d = delim[0]
if len(d) == 0 {
return r
}
}
i := len(r) - 1 - precision
j := int(math.Ceil(float64(i) / float64(3)))
s := make([]string, j)
v := r[i:]
for i > 0 && j > 0 {
j--
start := i - 3
if start < 0 {
start = 0
}
s[j] = r[start:i]
i = start
}
return strings.Join(s, d) + v
}
删除模板函数StrToStrSlice,使用Split代替
/*
Copyright 2016 Wenhui Shen <www.webx.top>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tplfunc
import (
"fmt"
"html/template"
"log"
"math"
"net/url"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/webx-top/captcha"
"github.com/webx-top/com"
)
func New() (r template.FuncMap) {
r = template.FuncMap{}
for name, function := range TplFuncMap {
r[name] = function
}
return
}
var TplFuncMap template.FuncMap = template.FuncMap{
// ======================
// time
// ======================
"Now": Now,
"ElapsedMemory": com.ElapsedMemory, //内存消耗
"TotalRunTime": com.TotalRunTime, //运行时长(从启动服务时算起)
"CaptchaForm": CaptchaForm, //验证码图片
"FormatByte": com.FormatByte, //字节转为适合理解的格式
"FriendlyTime": FriendlyTime,
"FormatPastTime": com.FormatPastTime, //以前距离现在多长时间
"DateFormat": com.DateFormat,
"DateFormatShort": com.DateFormatShort,
"Ts2time": TsToTime, // 时间戳数字转time.Time
"Ts2date": TsToDate, // 时间戳数字转日期字符串
// ======================
// compare
// ======================
"Eq": Eq,
"Add": Add,
"Sub": Sub,
"Div": Div,
"Mul": Mul,
"IsNil": IsNil,
"IsEmpty": IsEmpty,
"NotEmpty": NotEmpty,
"IsNaN": IsNaN,
"IsInf": IsInf,
// ======================
// conversion type
// ======================
"Html": ToHTML,
"Js": ToJS,
"Css": ToCSS,
"ToJS": ToJS,
"ToCSS": ToCSS,
"ToURL": ToURL,
"ToHTML": ToHTML,
"ToHTMLAttr": ToHTMLAttr,
"ToHTMLAttrs": ToHTMLAttrs,
"ToStrSlice": ToStrSlice,
"ToDuration": ToDuration,
"Str": com.Str,
"Int": com.Int,
"Int32": com.Int32,
"Int64": com.Int64,
"Uint": com.Uint,
"Uint32": com.Uint32,
"Uint64": com.Uint64,
"Float32": com.Float32,
"Float64": com.Float64,
"ToFloat64": ToFloat64,
"ToFixed": ToFixed,
"Math": Math,
"NumberFormat": NumberFormat,
"DurationFormat": DurationFormat,
// ======================
// string
// ======================
"Contains": strings.Contains,
"HasPrefix": strings.HasPrefix,
"HasSuffix": strings.HasSuffix,
"Trim": strings.TrimSpace,
"TrimLeft": strings.TrimLeft,
"TrimRight": strings.TrimRight,
"TrimPrefix": strings.TrimPrefix,
"TrimSuffix": strings.TrimSuffix,
"ToLower": strings.ToLower,
"ToUpper": strings.ToUpper,
"LowerCaseFirst": com.LowerCaseFirst,
"CamelCase": com.CamelCase,
"PascalCase": com.PascalCase,
"SnakeCase": com.SnakeCase,
"Reverse": com.Reverse,
"Ext": filepath.Ext,
"InExt": InExt,
"Concat": Concat,
"Replace": strings.Replace, //strings.Replace(s, old, new, n)
"Split": strings.Split,
"Join": strings.Join,
"Substr": com.Substr,
"StripTags": com.StripTags,
"Nl2br": NlToBr, // \n替换为<br>
"AddSuffix": AddSuffix,
// ======================
// encode & decode
// ======================
"JsonEncode": JsonEncode,
"JsonDecode": JsonDecode,
"UrlEncode": com.UrlEncode,
"UrlDecode": com.UrlDecode,
"Base64Encode": com.Base64Encode,
"Base64Decode": com.Base64Decode,
// ======================
// map & slice
// ======================
"InSlice": com.InSlice,
"InSlicex": com.InSliceIface,
"Set": Set,
"Append": Append,
"InStrSlice": InStrSlice,
"SearchStrSlice": SearchStrSlice,
"URLValues": URLValues,
"ToSlice": ToSlice,
"StrToSlice": StrToSlice,
// ======================
// regexp
// ======================
"Regexp": regexp.MustCompile,
"RegexpPOSIX": regexp.MustCompilePOSIX,
// ======================
// other
// ======================
"Ignore": Ignore,
"Default": Default,
}
func JsonEncode(s interface{}) string {
r, _ := com.SetJSON(s)
return r
}
func JsonDecode(s string) map[string]interface{} {
r := map[string]interface{}{}
e := com.GetJSON(&s, &r)
if e != nil {
log.Println(e)
}
return r
}
func Ignore(_ interface{}) interface{} {
return nil
}
func URLValues(values ...interface{}) url.Values {
v := url.Values{}
var k string
for i, j := 0, len(values); i < j; i++ {
if i%2 == 0 {
k = fmt.Sprint(values[i])
continue
}
v.Add(k, fmt.Sprint(values[i]))
k = ``
}
if len(k) > 0 {
v.Add(k, ``)
k = ``
}
return v
}
func ToStrSlice(s ...string) []string {
return s
}
func ToSlice(s ...interface{}) []interface{} {
return s
}
func StrToSlice(s string, sep string) []interface{} {
ss := strings.Split(s, sep)
r := make([]interface{}, len(ss))
for i, s := range ss {
r[i] = s
}
return r
}
func Concat(s ...string) string {
return strings.Join(s, ``)
}
func InExt(fileName string, exts ...string) bool {
ext := filepath.Ext(fileName)
ext = strings.ToLower(ext)
for _, _ext := range exts {
if ext == strings.ToLower(_ext) {
return true
}
}
return false
}
func Default(defaultV interface{}, v interface{}) interface{} {
switch val := v.(type) {
case nil:
return defaultV
case string:
if len(val) == 0 {
return defaultV
}
case uint8, int8, uint, int, uint32, int32, int64, uint64:
if val == 0 {
return defaultV
}
case float32, float64:
if val == 0.0 {
return defaultV
}
default:
if len(com.Str(v)) == 0 {
return defaultV
}
}
return v
}
func Set(renderArgs map[string]interface{}, key string, value interface{}) string {
renderArgs[key] = value
return ``
}
func Append(renderArgs map[string]interface{}, key string, value interface{}) string {
if renderArgs[key] == nil {
renderArgs[key] = []interface{}{value}
} else {
renderArgs[key] = append(renderArgs[key].([]interface{}), value)
}
return ``
}
//NlToBr Replaces newlines with <br />
func NlToBr(text string) template.HTML {
return template.HTML(Nl2br(text))
}
//CaptchaForm 验证码表单域
func CaptchaForm(args ...interface{}) template.HTML {
id := "captcha"
msg := "页面验证码已经失效,必须重新请求当前页面。确定要刷新本页面吗?"
onErr := "if(this.src.indexOf('?reload=')!=-1 && confirm('%s')) window.location.reload();"
format := `<img id="%[2]sImage" src="/captcha/%[1]s.png" alt="Captcha image" onclick="this.src=this.src.split('?')[0]+'?reload='+Math.random();" onerror="%[3]s" style="cursor:pointer" /><input type="hidden" name="captchaId" id="%[2]sId" value="%[1]s" />`
var customOnErr bool
switch len(args) {
case 3:
switch v := args[2].(type) {
case template.JS:
onErr = string(v)
customOnErr = true
case string:
msg = v
}
fallthrough
case 2:
if args[1] != nil {
v := fmt.Sprint(args[1])
format = v
}
fallthrough
case 1:
switch v := args[0].(type) {
case template.JS:
onErr = string(v)
customOnErr = true
case template.HTML:
format = string(v)
case string:
id = v
}
}
cid := captcha.New()
if !customOnErr {
onErr = fmt.Sprintf(onErr, msg)
}
return template.HTML(fmt.Sprintf(format, cid, id, onErr))
}
//CaptchaVerify 验证码验证
func CaptchaVerify(captchaSolution string, idGet func(string) string) bool {
//id := r.FormValue("captchaId")
id := idGet("captchaId")
if !captcha.VerifyString(id, captchaSolution) {
return false
}
return true
}
//Nl2br 将换行符替换为<br />
func Nl2br(text string) string {
return com.Nl2br(template.HTMLEscapeString(text))
}
func IsNil(a interface{}) bool {
switch a.(type) {
case nil:
return true
}
return false
}
func interface2Int64(value interface{}) (int64, bool) {
switch v := value.(type) {
case uint:
return int64(v), true
case uint8:
return int64(v), true
case uint16:
return int64(v), true
case uint32:
return int64(v), true
case uint64:
return int64(v), true
case int:
return int64(v), true
case int8:
return int64(v), true
case int16:
return int64(v), true
case int32:
return int64(v), true
case int64:
return v, true
default:
return 0, false
}
}
func interface2Float64(value interface{}) (float64, bool) {
switch v := value.(type) {
case float32:
return float64(v), true
case float64:
return v, true
default:
return 0, false
}
}
func ToFloat64(value interface{}) float64 {
if v, ok := interface2Int64(value); ok {
return float64(v)
}
if v, ok := interface2Float64(value); ok {
return v
}
return com.Float64(value)
}
func Add(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool
rleft, isInt = interface2Int64(left)
if !isInt {
fleft, _ = interface2Float64(left)
}
rright, isInt = interface2Int64(right)
if !isInt {
fright, _ = interface2Float64(right)
}
intSum := rleft + rright
if isInt {
return intSum
}
return fleft + fright + float64(intSum)
}
func Div(left interface{}, right interface{}) interface{} {
return ToFloat64(left) / ToFloat64(right)
}
func Mul(left interface{}, right interface{}) interface{} {
return ToFloat64(left) * ToFloat64(right)
}
func Math(op string, args ...interface{}) interface{} {
length := len(args)
if length < 1 {
return float64(0)
}
switch op {
case `mod`: //模
if length < 2 {
return float64(0)
}
return math.Mod(ToFloat64(args[0]), ToFloat64(args[1]))
case `abs`:
return math.Abs(ToFloat64(args[0]))
case `acos`:
return math.Acos(ToFloat64(args[0]))
case `acosh`:
return math.Acosh(ToFloat64(args[0]))
case `asin`:
return math.Asin(ToFloat64(args[0]))
case `asinh`:
return math.Asinh(ToFloat64(args[0]))
case `atan`:
return math.Atan(ToFloat64(args[0]))
case `atan2`:
if length < 2 {
return float64(0)
}
return math.Atan2(ToFloat64(args[0]), ToFloat64(args[1]))
case `atanh`:
return math.Atanh(ToFloat64(args[0]))
case `cbrt`:
return math.Cbrt(ToFloat64(args[0]))
case `ceil`:
return math.Ceil(ToFloat64(args[0]))
case `copysign`:
if length < 2 {
return float64(0)
}
return math.Copysign(ToFloat64(args[0]), ToFloat64(args[1]))
case `cos`:
return math.Cos(ToFloat64(args[0]))
case `cosh`:
return math.Cosh(ToFloat64(args[0]))
case `dim`:
if length < 2 {
return float64(0)
}
return math.Dim(ToFloat64(args[0]), ToFloat64(args[1]))
case `erf`:
return math.Erf(ToFloat64(args[0]))
case `erfc`:
return math.Erfc(ToFloat64(args[0]))
case `exp`:
return math.Exp(ToFloat64(args[0]))
case `exp2`:
return math.Exp2(ToFloat64(args[0]))
case `floor`:
return math.Floor(ToFloat64(args[0]))
case `max`:
if length < 2 {
return float64(0)
}
return math.Max(ToFloat64(args[0]), ToFloat64(args[1]))
case `min`:
if length < 2 {
return float64(0)
}
return math.Min(ToFloat64(args[0]), ToFloat64(args[1]))
case `pow`: //幂
if length < 2 {
return float64(0)
}
return math.Pow(ToFloat64(args[0]), ToFloat64(args[1]))
case `sqrt`: //平方根
return math.Sqrt(ToFloat64(args[0]))
case `sin`:
return math.Sin(ToFloat64(args[0]))
case `log`:
return math.Log(ToFloat64(args[0]))
case `log2`:
return math.Log2(ToFloat64(args[0]))
case `log10`:
return math.Log10(ToFloat64(args[0]))
case `tan`:
return math.Tan(ToFloat64(args[0]))
case `tanh`:
return math.Tanh(ToFloat64(args[0]))
case `add`: //加
if length < 2 {
return float64(0)
}
return Add(ToFloat64(args[0]), ToFloat64(args[1]))
case `sub`: //减
if length < 2 {
return float64(0)
}
return Sub(ToFloat64(args[0]), ToFloat64(args[1]))
case `mul`: //乘
if length < 2 {
return float64(0)
}
return Mul(ToFloat64(args[0]), ToFloat64(args[1]))
case `div`: //除
if length < 2 {
return float64(0)
}
return Div(ToFloat64(args[0]), ToFloat64(args[1]))
}
return nil
}
func IsNaN(v interface{}) bool {
return math.IsNaN(ToFloat64(v))
}
func IsInf(v interface{}, s interface{}) bool {
return math.IsInf(ToFloat64(v), com.Int(s))
}
func Sub(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool
rleft, isInt = interface2Int64(left)
if !isInt {
fleft, _ = interface2Float64(left)
}
rright, isInt = interface2Int64(right)
if !isInt {
fright, _ = interface2Float64(right)
}
if isInt {
return rleft - rright
}
return fleft + float64(rleft) - (fright + float64(rright))
}
func ToFixed(value interface{}, precision interface{}) string {
return fmt.Sprintf("%.*f", com.Int(precision), ToFloat64(value))
}
func Now() time.Time {
return time.Now()
}
func Eq(left interface{}, right interface{}) bool {
leftIsNil := (left == nil)
rightIsNil := (right == nil)
if leftIsNil || rightIsNil {
if leftIsNil && rightIsNil {
return true
}
return false
}
return fmt.Sprintf("%v", left) == fmt.Sprintf("%v", right)
}
func ToHTML(raw string) template.HTML {
return template.HTML(raw)
}
func ToHTMLAttr(raw string) template.HTMLAttr {
return template.HTMLAttr(raw)
}
func ToHTMLAttrs(raw map[string]interface{}) (r map[template.HTMLAttr]interface{}) {
r = make(map[template.HTMLAttr]interface{})
for k, v := range raw {
r[ToHTMLAttr(k)] = v
}
return
}
func ToJS(raw string) template.JS {
return template.JS(raw)
}
func ToCSS(raw string) template.CSS {
return template.CSS(raw)
}
func ToURL(raw string) template.URL {
return template.URL(raw)
}
func AddSuffix(s string, suffix string, args ...string) string {
beforeChar := `.`
if len(args) > 0 {
beforeChar = args[0]
if beforeChar == `` {
return s + suffix
}
}
p := strings.LastIndex(s, beforeChar)
if p < 0 {
return s
}
return s[0:p] + suffix + s[p:]
}
func IsEmpty(a interface{}) bool {
switch v := a.(type) {
case nil:
return true
case string:
return len(v) == 0
case []interface{}:
return len(v) < 1
default:
switch fmt.Sprintf(`%v`, a) {
case `<nil>`, ``, `[]`:
return true
}
}
return false
}
func NotEmpty(a interface{}) bool {
return !IsEmpty(a)
}
func InStrSlice(values []string, value string) bool {
for _, v := range values {
if v == value {
return true
}
}
return false
}
func SearchStrSlice(values []string, value string) int {
for i, v := range values {
if v == value {
return i
}
}
return -1
}
func DurationFormat(lang interface{}, t interface{}, args ...string) *com.Durafmt {
duration := ToDuration(t, args...)
return com.ParseDuration(duration, lang)
}
func ToDuration(t interface{}, args ...string) time.Duration {
td := time.Second
if len(args) > 0 {
switch args[0] {
case `ns`:
td = time.Nanosecond
case `us`:
td = time.Microsecond
case `s`:
td = time.Second
case `ms`:
td = time.Millisecond
case `h`:
td = time.Hour
case `m`:
td = time.Minute
}
}
switch v := t.(type) {
case time.Duration:
return v
case int64:
td = time.Duration(v) * td
case int:
td = time.Duration(v) * td
case uint:
td = time.Duration(v) * td
case int32:
td = time.Duration(v) * td
case uint32:
td = time.Duration(v) * td
case uint64:
td = time.Duration(v) * td
default:
td = time.Duration(com.Int64(t)) * td
}
return td
}
func FriendlyTime(t interface{}, args ...string) string {
var td time.Duration
switch v := t.(type) {
case time.Duration:
td = v
case int64:
td = time.Duration(v)
case int:
td = time.Duration(v)
case uint:
td = time.Duration(v)
case int32:
td = time.Duration(v)
case uint32:
td = time.Duration(v)
case uint64:
td = time.Duration(v)
default:
td = time.Duration(com.Int64(t))
}
return com.FriendlyTime(td, args...)
}
func TsToTime(timestamp interface{}) time.Time {
return TimestampToTime(timestamp)
}
func TsToDate(format string, timestamp interface{}) string {
t := TimestampToTime(timestamp)
if t.IsZero() {
return ``
}
return t.Format(format)
}
func TimestampToTime(timestamp interface{}) time.Time {
var ts int64
switch v := timestamp.(type) {
case int64:
ts = v
case uint:
ts = int64(v)
case int:
ts = int64(v)
case uint32:
ts = int64(v)
case int32:
ts = int64(v)
case uint64:
ts = int64(v)
default:
i, e := strconv.ParseInt(fmt.Sprint(timestamp), 10, 64)
if e != nil {
log.Println(e)
}
ts = i
}
return time.Unix(ts, 0)
}
func NumberFormat(number interface{}, precision int, delim ...string) string {
r := fmt.Sprintf(`%.*f`, precision, ToFloat64(number))
d := `,`
if len(delim) > 0 {
d = delim[0]
if len(d) == 0 {
return r
}
}
i := len(r) - 1 - precision
j := int(math.Ceil(float64(i) / float64(3)))
s := make([]string, j)
v := r[i:]
for i > 0 && j > 0 {
j--
start := i - 3
if start < 0 {
start = 0
}
s[j] = r[start:i]
i = start
}
return strings.Join(s, d) + v
}
|
package sinmetalcraft
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
"google.golang.org/appengine/user"
"google.golang.org/api/compute/v1"
"golang.org/x/net/context"
)
const PROJECT_NAME = "sinmetalcraft"
const INSTANCE_NAME = "minecraft"
func init() {
api := MinecraftApi{}
http.HandleFunc("/minecraft", handlerMinecraftLog)
http.HandleFunc("/api/1/minecraft", api.Handler)
}
type Minecraft struct {
Key *datastore.Key `json:"-" datastore:"-"`
KeyStr string `json:"key" datastore:"-"`
World string `json:"world"`
ResourceID int64 `json:"resourceID"`
Zone string `json:"zone" datastore:",unindexed"`
IPAddr string `json:"ipAddr" datastore:",unindexed"`
Status string `json:"status" datastore:",unindexed"`
OperationType string `json:"operationType" datastore:",unindexed"`
OperationStatus string `json:"operationstatus" datastore:",unindexed"`
LatestSnapshot string `json:"latestSnpshot" datastore:",unindexed"`
JarVersion string `json:"jarVersion" datastore:",unindexed"`
OverviewerSnapshot string `json:"overViewerSnapshot" datastore:",unindexed"` // Minecraft Overviewerを作成済みのsnapshot name
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type MinecraftApiListResponse struct {
Items []MinecraftApiResponse `json:"items"`
Cursor string `json:"cursor"`
}
type MinecraftApiResponse struct {
InstanceName string `json:"instanceName"`
Zone string `json:"zone"`
IPAddr string `json:"iPAddr"`
Status string `json:"status"`
CreationTimestamp string `json:"creationTimestamp"`
}
type Metadata struct {
ProjectID string `json:"projectId"`
ServiceName string `json:"serviceName"`
Zone string `json:"zone"`
Labels map[string]string `json:"labels"`
Timestamp string `json:"timestamp"`
}
type StructPayload struct {
Log string `json:"log"`
}
type PubSubData struct {
Metadata Metadata `json:"metadata"`
InsertID string `json:"insertId"`
Log string `json:"log"`
StructPayload StructPayload `json:"structPayload"`
}
type Message struct {
Data string `json:"data"`
Attributes map[string]string `json:"attributes"`
MessageID string `json:"message_id"`
}
type PubSubBody struct {
Message Message `json:"message"`
Subscription string `json:"subscription"`
}
type MinecraftApi struct{}
// /api/1/minecraft handler
func (a *MinecraftApi) Handler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
a.Post(w, r)
} else if r.Method == "PUT" {
a.Put(w, r)
} else if r.Method == "GET" {
a.List(w, r)
} else if r.Method == "DELETE" {
a.Delete(w, r)
} else {
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
// create world data
func (a *MinecraftApi) Post(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key := datastore.NewKey(ctx, "Minecraft", minecraft.World, 0, nil)
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
minecraft.Status = "not_exists"
now := time.Now()
minecraft.CreatedAt = now
minecraft.UpdatedAt = now
_, err = datastore.Put(ctx, key, &minecraft)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
minecraft.KeyStr = key.Encode()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(minecraft)
}
// update world data
func (a *MinecraftApi) Put(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key, err := datastore.DecodeKey(minecraft.KeyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
minecraft.Key = key
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
entity.IPAddr = minecraft.IPAddr
entity.Zone = minecraft.Zone
entity.JarVersion = minecraft.JarVersion
entity.UpdatedAt = time.Now()
_, err = datastore.Put(ctx, key, &entity)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(minecraft)
}
// delete world data
func (a *MinecraftApi) Delete(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
keyStr := r.FormValue("key")
key, err := datastore.DecodeKey(keyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
return datastore.Delete(ctx, key)
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Delete Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(`{}`)
}
// list world data
func (a *MinecraftApi) List(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
q := datastore.NewQuery("Minecraft").Order("-UpdatedAt")
list := make([]*Minecraft, 0)
for t := q.Run(ctx); ; {
var entity Minecraft
key, err := t.Next(&entity)
if err == datastore.Done {
break
}
if err != nil {
log.Errorf(ctx, "Minecraft Query Error. error = %s", err.Error())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusInternalServerError)
return
}
entity.Key = key
entity.KeyStr = key.Encode()
list = append(list, &entity)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(list)
}
// handle cloud pub/sub request
func handlerMinecraftLog(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
for k, v := range r.Header {
log.Infof(ctx, "%s:%s", k, v)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Errorf(ctx, "ERROR request body read: %s", err)
w.WriteHeader(500)
return
}
log.Infof(ctx, "request body = %s", string(body))
var psb PubSubBody
err = psb.Decode(body)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Body decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
log.Infof(ctx, "request Pub Sub Body = %v", psb)
var psd PubSubData
err = psd.Decode(psb.Message.Data)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Data decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
b, err := json.Marshal(psd)
if err != nil {
log.Errorf(ctx, "PubSubData json marshal error %v", err)
} else {
log.Infof(ctx, "request Pub Sub Data = %s", b)
}
if len(psd.StructPayload.Log) < 1 {
log.Infof(ctx, "StructPayload.Log is eompy.")
w.WriteHeader(http.StatusOK)
return
}
var sm SlackMessage
fields := make([]SlackField, 0)
sa := SlackAttachment{
Color: "#36a64f",
AuthorName: "sinmetalcraft",
AuthorIcon: "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg",
Title: psd.StructPayload.Log,
Fields: fields,
}
sm.UserName = "sinmetalcraft"
sm.IconUrl = "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg"
sm.Text = ""
sm.Attachments = []SlackAttachment{sa}
acs := AppConfigService{}
config, err := acs.Get(ctx)
if err != nil {
log.Errorf(ctx, "ERROR App Config Get: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
_, err = PostToSlack(ctx, config.SlackPostUrl, sm)
if err != nil {
log.Errorf(ctx, "ERROR Post Slack: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
// list gce instance
func listInstance(ctx context.Context, is *compute.InstancesService, zone string) ([]*compute.Instance, string, error) {
ilc := is.List(PROJECT_NAME, zone)
il, err := ilc.Do()
if err != nil {
return nil, "", err
}
return il.Items, il.NextPageToken, nil
}
// create disk from snapshot
func createDiskFromSnapshot(ctx context.Context, ds *compute.DisksService, minecraft Minecraft) (*compute.Operation, error) {
name := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
d := &compute.Disk{
Name: name,
SizeGb: 100,
SourceSnapshot: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/snapshots/" + minecraft.LatestSnapshot,
Type: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
}
ope, err := ds.Insert(PROJECT_NAME, minecraft.Zone, d).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert disk: %s", err)
return nil, err
}
WriteLog(ctx, "INSTNCE_DISK_OPE", ope)
return ope, err
}
// create gce instance
func createInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
worldDiskName := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
log.Infof(ctx, "create instance name = %s", name)
startupScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-startup-script.sh"
shutdownScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-shutdown-script.sh"
stateValue := "new"
newIns := &compute.Instance{
Name: name,
Zone: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone,
MachineType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/machineTypes/n1-highmem-2",
Disks: []*compute.AttachedDisk{
&compute.AttachedDisk{
AutoDelete: true,
Boot: true,
DeviceName: name,
Mode: "READ_WRITE",
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/images/family/minecraft",
DiskType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
DiskSizeGb: 100,
},
},
&compute.AttachedDisk{
AutoDelete: true,
Boot: false,
DeviceName: worldDiskName,
Mode: "READ_WRITE",
Source: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/disks/" + worldDiskName,
},
},
CanIpForward: false,
NetworkInterfaces: []*compute.NetworkInterface{
&compute.NetworkInterface{
Network: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/networks/default",
AccessConfigs: []*compute.AccessConfig{
&compute.AccessConfig{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
},
},
},
},
Tags: &compute.Tags{
Items: []string{
"minecraft-server",
},
},
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
&compute.MetadataItems{
Key: "startup-script-url",
Value: &startupScriptURL,
},
&compute.MetadataItems{
Key: "shutdown-script-url",
Value: &shutdownScriptURL,
},
&compute.MetadataItems{
Key: "world",
Value: &minecraft.World,
},
&compute.MetadataItems{
Key: "state",
Value: &stateValue,
},
&compute.MetadataItems{
Key: "minecraft-version",
Value: &minecraft.JarVersion,
},
},
},
ServiceAccounts: []*compute.ServiceAccount{
&compute.ServiceAccount{
Email: "default",
Scopes: []string{
"https://www.googleapis.com/auth/cloud-platform",
},
},
},
Scheduling: &compute.Scheduling{
AutomaticRestart: false,
OnHostMaintenance: "TERMINATE",
Preemptible: true,
},
}
ope, err := is.Insert(PROJECT_NAME, minecraft.Zone, newIns).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_CREATE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// start instance
func startInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "start instance name = %s", name)
ope, err := is.Start(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_START_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// reset instance
func resetInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "reset instance name = %s", name)
ope, err := is.Reset(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_RESET_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// delete instance
func deleteInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "delete instance name = %s", name)
ope, err := is.Delete(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR delete instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_DELETE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
func (psb *PubSubBody) Decode(body []byte) error {
err := json.Unmarshal(body, psb)
if err != nil {
return err
}
return nil
}
func (psd *PubSubData) Decode(body string) error {
mr := base64.NewDecoder(base64.StdEncoding, strings.NewReader(body))
return json.NewDecoder(mr).Decode(psd)
}
type SlackMessage struct {
UserName string `json:"username"`
IconUrl string `json:"icon_url"`
Text string `json:"text"`
Attachments []SlackAttachment `json:"attachments"`
}
type SlackAttachment struct {
Color string `json:"color"`
AuthorName string `json:"author_name"`
AuthorLink string `json:"author_link"`
AuthorIcon string `json:"author_icon"`
Title string `json:"title"`
TitleLink string `json:"title_link"`
Fields []SlackField `json:"fields"`
}
type SlackField struct {
Title string `json:"title"`
}
func PostToSlack(ctx context.Context, url string, message SlackMessage) (resp *http.Response, err error) {
client := urlfetch.Client(ctx)
body, err := json.Marshal(message)
if err != nil {
return nil, err
}
fmt.Println(string(body))
return client.Post(
url,
"application/json",
bytes.NewReader(body))
}
func WriteLog(ctx context.Context, key string, v interface{}) {
body, err := json.Marshal(v)
if err != nil {
log.Errorf(ctx, "WriteLog Error %s %v", err.Error(), v)
}
log.Infof(ctx, `{"%s":%s}`, key, body)
}
gofmt
package sinmetalcraft
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/log"
"google.golang.org/appengine/urlfetch"
"google.golang.org/appengine/user"
"google.golang.org/api/compute/v1"
"golang.org/x/net/context"
)
const PROJECT_NAME = "sinmetalcraft"
const INSTANCE_NAME = "minecraft"
func init() {
api := MinecraftApi{}
http.HandleFunc("/minecraft", handlerMinecraftLog)
http.HandleFunc("/api/1/minecraft", api.Handler)
}
type Minecraft struct {
Key *datastore.Key `json:"-" datastore:"-"`
KeyStr string `json:"key" datastore:"-"`
World string `json:"world"`
ResourceID int64 `json:"resourceID"`
Zone string `json:"zone" datastore:",unindexed"`
IPAddr string `json:"ipAddr" datastore:",unindexed"`
Status string `json:"status" datastore:",unindexed"`
OperationType string `json:"operationType" datastore:",unindexed"`
OperationStatus string `json:"operationstatus" datastore:",unindexed"`
LatestSnapshot string `json:"latestSnpshot" datastore:",unindexed"`
JarVersion string `json:"jarVersion" datastore:",unindexed"`
OverviewerSnapshot string `json:"overViewerSnapshot" datastore:",unindexed"` // Minecraft Overviewerを作成済みのsnapshot name
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type MinecraftApiListResponse struct {
Items []MinecraftApiResponse `json:"items"`
Cursor string `json:"cursor"`
}
type MinecraftApiResponse struct {
InstanceName string `json:"instanceName"`
Zone string `json:"zone"`
IPAddr string `json:"iPAddr"`
Status string `json:"status"`
CreationTimestamp string `json:"creationTimestamp"`
}
type Metadata struct {
ProjectID string `json:"projectId"`
ServiceName string `json:"serviceName"`
Zone string `json:"zone"`
Labels map[string]string `json:"labels"`
Timestamp string `json:"timestamp"`
}
type StructPayload struct {
Log string `json:"log"`
}
type PubSubData struct {
Metadata Metadata `json:"metadata"`
InsertID string `json:"insertId"`
Log string `json:"log"`
StructPayload StructPayload `json:"structPayload"`
}
type Message struct {
Data string `json:"data"`
Attributes map[string]string `json:"attributes"`
MessageID string `json:"message_id"`
}
type PubSubBody struct {
Message Message `json:"message"`
Subscription string `json:"subscription"`
}
type MinecraftApi struct{}
// /api/1/minecraft handler
func (a *MinecraftApi) Handler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
a.Post(w, r)
} else if r.Method == "PUT" {
a.Put(w, r)
} else if r.Method == "GET" {
a.List(w, r)
} else if r.Method == "DELETE" {
a.Delete(w, r)
} else {
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
// create world data
func (a *MinecraftApi) Post(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key := datastore.NewKey(ctx, "Minecraft", minecraft.World, 0, nil)
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
minecraft.Status = "not_exists"
now := time.Now()
minecraft.CreatedAt = now
minecraft.UpdatedAt = now
_, err = datastore.Put(ctx, key, &minecraft)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
minecraft.KeyStr = key.Encode()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(minecraft)
}
// update world data
func (a *MinecraftApi) Put(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
var minecraft Minecraft
err := json.NewDecoder(r.Body).Decode(&minecraft)
if err != nil {
log.Infof(ctx, "rquest body, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid request."}`))
return
}
defer r.Body.Close()
key, err := datastore.DecodeKey(minecraft.KeyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
minecraft.Key = key
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
var entity Minecraft
err := datastore.Get(ctx, key, &entity)
if err != datastore.ErrNoSuchEntity && err != nil {
return err
}
entity.IPAddr = minecraft.IPAddr
entity.Zone = minecraft.Zone
entity.JarVersion = minecraft.JarVersion
entity.UpdatedAt = time.Now()
_, err = datastore.Put(ctx, key, &entity)
if err != nil {
return err
}
return nil
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Put Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(minecraft)
}
// delete world data
func (a *MinecraftApi) Delete(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
u := user.Current(ctx)
if u == nil {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
loginURL, err := user.LoginURL(ctx, "")
if err != nil {
log.Errorf(ctx, "get user login URL error, %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write([]byte(fmt.Sprintf(`{"loginURL":"%s"}`, loginURL)))
return
}
if user.IsAdmin(ctx) == false {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusForbidden)
return
}
keyStr := r.FormValue("key")
key, err := datastore.DecodeKey(keyStr)
if err != nil {
log.Infof(ctx, "invalid key, %v", r.Body)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"message": "invalid key."}`))
return
}
err = datastore.RunInTransaction(ctx, func(c context.Context) error {
return datastore.Delete(ctx, key)
}, nil)
if err != nil {
log.Errorf(ctx, "Minecraft Delete Error. error = %s", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(`{}`)
}
// list world data
func (a *MinecraftApi) List(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
q := datastore.NewQuery("Minecraft").Order("-UpdatedAt")
list := make([]*Minecraft, 0)
for t := q.Run(ctx); ; {
var entity Minecraft
key, err := t.Next(&entity)
if err == datastore.Done {
break
}
if err != nil {
log.Errorf(ctx, "Minecraft Query Error. error = %s", err.Error())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusInternalServerError)
return
}
entity.Key = key
entity.KeyStr = key.Encode()
list = append(list, &entity)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(list)
}
// handle cloud pub/sub request
func handlerMinecraftLog(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
for k, v := range r.Header {
log.Infof(ctx, "%s:%s", k, v)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Errorf(ctx, "ERROR request body read: %s", err)
w.WriteHeader(500)
return
}
log.Infof(ctx, "request body = %s", string(body))
var psb PubSubBody
err = psb.Decode(body)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Body decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
log.Infof(ctx, "request Pub Sub Body = %v", psb)
var psd PubSubData
err = psd.Decode(psb.Message.Data)
if err != nil {
log.Errorf(ctx, "ERROR request body Pub Sub Data decode: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
b, err := json.Marshal(psd)
if err != nil {
log.Errorf(ctx, "PubSubData json marshal error %v", err)
} else {
log.Infof(ctx, "request Pub Sub Data = %s", b)
}
if len(psd.StructPayload.Log) < 1 {
log.Infof(ctx, "StructPayload.Log is eompy.")
w.WriteHeader(http.StatusOK)
return
}
var sm SlackMessage
fields := make([]SlackField, 0)
sa := SlackAttachment{
Color: "#36a64f",
AuthorName: "sinmetalcraft",
AuthorIcon: "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg",
Title: psd.StructPayload.Log,
Fields: fields,
}
sm.UserName = "sinmetalcraft"
sm.IconUrl = "https://storage.googleapis.com/sinmetalcraft-image/minecraft.jpeg"
sm.Text = ""
sm.Attachments = []SlackAttachment{sa}
acs := AppConfigService{}
config, err := acs.Get(ctx)
if err != nil {
log.Errorf(ctx, "ERROR App Config Get: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
_, err = PostToSlack(ctx, config.SlackPostUrl, sm)
if err != nil {
log.Errorf(ctx, "ERROR Post Slack: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
// list gce instance
func listInstance(ctx context.Context, is *compute.InstancesService, zone string) ([]*compute.Instance, string, error) {
ilc := is.List(PROJECT_NAME, zone)
il, err := ilc.Do()
if err != nil {
return nil, "", err
}
return il.Items, il.NextPageToken, nil
}
// create disk from snapshot
func createDiskFromSnapshot(ctx context.Context, ds *compute.DisksService, minecraft Minecraft) (*compute.Operation, error) {
name := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
d := &compute.Disk{
Name: name,
SizeGb: 100,
SourceSnapshot: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/snapshots/" + minecraft.LatestSnapshot,
Type: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
}
ope, err := ds.Insert(PROJECT_NAME, minecraft.Zone, d).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert disk: %s", err)
return nil, err
}
WriteLog(ctx, "INSTNCE_DISK_OPE", ope)
return ope, err
}
// create gce instance
func createInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
worldDiskName := fmt.Sprintf("%s-world-%s", INSTANCE_NAME, minecraft.World)
log.Infof(ctx, "create instance name = %s", name)
startupScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-startup-script.sh"
shutdownScriptURL := "gs://sinmetalcraft-minecraft-shell/minecraftserver-shutdown-script.sh"
stateValue := "new"
newIns := &compute.Instance{
Name: name,
Zone: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone,
MachineType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/machineTypes/n1-highmem-2",
Disks: []*compute.AttachedDisk{
&compute.AttachedDisk{
AutoDelete: true,
Boot: true,
DeviceName: name,
Mode: "READ_WRITE",
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/images/family/minecraft",
DiskType: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/diskTypes/pd-ssd",
DiskSizeGb: 100,
},
},
&compute.AttachedDisk{
AutoDelete: true,
Boot: false,
DeviceName: worldDiskName,
Mode: "READ_WRITE",
Source: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/zones/" + minecraft.Zone + "/disks/" + worldDiskName,
},
},
CanIpForward: false,
NetworkInterfaces: []*compute.NetworkInterface{
&compute.NetworkInterface{
Network: "https://www.googleapis.com/compute/v1/projects/" + PROJECT_NAME + "/global/networks/default",
AccessConfigs: []*compute.AccessConfig{
&compute.AccessConfig{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
},
},
},
},
Tags: &compute.Tags{
Items: []string{
"minecraft-server",
},
},
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
&compute.MetadataItems{
Key: "startup-script-url",
Value: &startupScriptURL,
},
&compute.MetadataItems{
Key: "shutdown-script-url",
Value: &shutdownScriptURL,
},
&compute.MetadataItems{
Key: "world",
Value: &minecraft.World,
},
&compute.MetadataItems{
Key: "state",
Value: &stateValue,
},
&compute.MetadataItems{
Key: "minecraft-version",
Value: &minecraft.JarVersion,
},
},
},
ServiceAccounts: []*compute.ServiceAccount{
&compute.ServiceAccount{
Email: "default",
Scopes: []string{
"https://www.googleapis.com/auth/cloud-platform",
},
},
},
Scheduling: &compute.Scheduling{
AutomaticRestart: false,
OnHostMaintenance: "TERMINATE",
Preemptible: true,
},
}
ope, err := is.Insert(PROJECT_NAME, minecraft.Zone, newIns).Do()
if err != nil {
log.Errorf(ctx, "ERROR insert instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_CREATE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// start instance
func startInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "start instance name = %s", name)
ope, err := is.Start(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_START_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// reset instance
func resetInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "reset instance name = %s", name)
ope, err := is.Reset(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR reset instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_RESET_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
// delete instance
func deleteInstance(ctx context.Context, is *compute.InstancesService, minecraft Minecraft) (string, error) {
name := INSTANCE_NAME + "-" + minecraft.World
log.Infof(ctx, "delete instance name = %s", name)
ope, err := is.Delete(PROJECT_NAME, minecraft.Zone, name).Do()
if err != nil {
log.Errorf(ctx, "ERROR delete instance: %s", err)
return "", err
}
WriteLog(ctx, "INSTNCE_DELETE_OPE", ope)
_, err = CallMinecraftTQ(ctx, minecraft.Key, ope.Name)
if err != nil {
return name, err
}
return name, nil
}
func (psb *PubSubBody) Decode(body []byte) error {
err := json.Unmarshal(body, psb)
if err != nil {
return err
}
return nil
}
func (psd *PubSubData) Decode(body string) error {
mr := base64.NewDecoder(base64.StdEncoding, strings.NewReader(body))
return json.NewDecoder(mr).Decode(psd)
}
type SlackMessage struct {
UserName string `json:"username"`
IconUrl string `json:"icon_url"`
Text string `json:"text"`
Attachments []SlackAttachment `json:"attachments"`
}
type SlackAttachment struct {
Color string `json:"color"`
AuthorName string `json:"author_name"`
AuthorLink string `json:"author_link"`
AuthorIcon string `json:"author_icon"`
Title string `json:"title"`
TitleLink string `json:"title_link"`
Fields []SlackField `json:"fields"`
}
type SlackField struct {
Title string `json:"title"`
}
func PostToSlack(ctx context.Context, url string, message SlackMessage) (resp *http.Response, err error) {
client := urlfetch.Client(ctx)
body, err := json.Marshal(message)
if err != nil {
return nil, err
}
fmt.Println(string(body))
return client.Post(
url,
"application/json",
bytes.NewReader(body))
}
func WriteLog(ctx context.Context, key string, v interface{}) {
body, err := json.Marshal(v)
if err != nil {
log.Errorf(ctx, "WriteLog Error %s %v", err.Error(), v)
}
log.Infof(ctx, `{"%s":%s}`, key, body)
}
|
package dragonfly
import (
//"fmt"
"io/ioutil"
"os"
"os/exec"
)
type Step interface {
Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error)
//Args []string
//Command string
}
type Job struct {
Steps []Step
Temp *os.File
}
func (job *Job) Apply() (temp *os.File, err error) {
tail := make(chan *os.File)
errChan := make(chan error)
close(tail)
defer close(errChan)
for _, step := range job.Steps {
tail, errChan = step.Apply(tail, errChan)
}
select {
case err = <-errChan:
case temp = <-tail:
}
return
}
type stepApplication func(temp *os.File) (*os.File, error)
func applyStepPipeline(in chan *os.File, errIn chan error, step stepApplication) (out chan *os.File, errChan chan error) {
out = make(chan *os.File)
errChan = make(chan error)
go func() {
defer close(out)
defer close(errChan)
var (
err error
content *os.File
)
select {
case prev := <-in:
content, err = step(prev)
case err = <-errIn:
}
if err != nil {
errChan <- err
return
}
out <- content
}()
return out, errChan
}
type FetchFileStep struct {
Args []string
Command string
}
type ResizeStep struct {
Args []string
Command string
}
func (step ResizeStep) Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error) {
return applyStepPipeline(in, errIn, func(temp *os.File) (newTemp *os.File, err error) {
format := step.Args[1]
return step.resize(temp, format)
})
}
func (step FetchFileStep) Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error) {
return applyStepPipeline(in, errIn, func(_ *os.File) (temp *os.File, err error) {
filename := step.Args[0]
return fechFile(filename)
//return nil, errors.New("please don't stop the music")
})
}
func (step ResizeStep) resize(image *os.File, format string) (*os.File, error) {
binary, err := exec.LookPath("convert")
if err != nil {
return nil, err
}
tempPrefix := "godragonfly" + format
resized, err := ioutil.TempFile(os.TempDir(), tempPrefix)
if err != nil {
return nil, err
}
if image == nil {
return nil, err
}
args := []string{
image.Name(),
"-resize", format,
resized.Name(),
}
cmd := exec.Command(binary, args...)
cmd.Run()
return resized, err
}
func fechFile(filename string) (*os.File, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
temp, err := ioutil.TempFile(os.TempDir(), "godragonfly")
if err != nil {
return nil, err
}
_, err = temp.Write(content)
if err != nil {
return nil, err
}
return temp, err
}
refactor image magick convert
package dragonfly
import (
//"fmt"
"io/ioutil"
"os"
"os/exec"
)
type Step interface {
Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error)
//Args []string
//Command string
}
type Job struct {
Steps []Step
Temp *os.File
}
func (job *Job) Apply() (temp *os.File, err error) {
tail := make(chan *os.File)
errChan := make(chan error)
close(tail)
defer close(errChan)
for _, step := range job.Steps {
tail, errChan = step.Apply(tail, errChan)
}
select {
case err = <-errChan:
case temp = <-tail:
}
return
}
type stepApplication func(temp *os.File) (*os.File, error)
func applyStepPipeline(in chan *os.File, errIn chan error, step stepApplication) (out chan *os.File, errChan chan error) {
out = make(chan *os.File)
errChan = make(chan error)
go func() {
defer close(out)
defer close(errChan)
var (
err error
content *os.File
)
select {
case prev := <-in:
content, err = step(prev)
case err = <-errIn:
}
if err != nil {
errChan <- err
return
}
out <- content
}()
return out, errChan
}
type FetchFileStep struct {
Args []string
Command string
}
type ResizeStep struct {
Args []string
Command string
}
func (step ResizeStep) Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error) {
return applyStepPipeline(in, errIn, func(temp *os.File) (newTemp *os.File, err error) {
format := step.Args[1]
return step.resize(temp, format)
})
}
func (step FetchFileStep) Apply(in chan *os.File, errIn chan error) (out chan *os.File, errChan chan error) {
return applyStepPipeline(in, errIn, func(_ *os.File) (temp *os.File, err error) {
filename := step.Args[0]
return fechFile(filename)
//return nil, errors.New("please don't stop the music")
})
}
func (step ResizeStep) resize(image *os.File, format string) (*os.File, error) {
return shellConvert(image, "-resize", format)
}
func shellConvert(in *os.File, args ...string) (out *os.File, err error) {
binary, err := exec.LookPath("convert")
if err != nil {
return
}
out, err = newTempfile()
if err != nil {
return
}
convertArgs := []string{in.Name()}
convertArgs = append(convertArgs, args...)
convertArgs = append(convertArgs, out.Name())
cmd := exec.Command(binary, convertArgs...)
err = cmd.Run()
return out, err
}
func newTempfile() (*os.File, error) {
tempPrefix := "godragonfly"
return ioutil.TempFile(os.TempDir(), tempPrefix)
}
func fechFile(filename string) (*os.File, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
temp, err := ioutil.TempFile(os.TempDir(), "godragonfly")
if err != nil {
return nil, err
}
_, err = temp.Write(content)
if err != nil {
return nil, err
}
return temp, err
}
|
package securepass
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
const (
// DefaultRemote is the default Content-Type header used in HTTP requests
DefaultRemote = "https://beta.secure-pass.net"
// ContentType is the default Content-Type header used in HTTP requests
ContentType = "application/json"
// UserAgent contains the default User-Agent value used in HTTP requests
UserAgent = "SecurePass CLI"
)
// SecurePass main object type
type SecurePass struct {
AppID string `ini:"APP_ID"`
AppSecret string `ini:"APP_SECRET"`
Endpoint string
}
// NewSecurePass makes and initialize a new SecurePass instance
func NewSecurePass(appid, appsecret, remote string) (*SecurePass, error) {
u, err := url.Parse(remote)
if err != nil {
return nil, err
}
if !u.IsAbs() {
return nil, fmt.Errorf("'%s' is not an absolute URL", remote)
}
if u.Scheme != "https" {
return nil, fmt.Errorf("scheme of '%s' isn't 'https'", remote)
}
return &SecurePass{
AppID: appid,
AppSecret: appsecret,
Endpoint: remote}, nil
}
func (s *SecurePass) setupRequestFieds(req *http.Request) {
req.Header.Set("Accept", ContentType)
req.Header.Set("Content-Type", ContentType)
req.Header.Set("User-Agent", UserAgent)
req.Header.Set("X-SecurePass-App-ID", s.AppID)
req.Header.Set("X-SecurePass-App-Secret", s.AppSecret)
}
func (s *SecurePass) makeRequestURL(path string) (string, error) {
baseURL, _ := url.Parse(s.Endpoint)
URL, err := url.Parse(path)
if err != nil {
return "", err
}
return baseURL.ResolveReference(URL).String(), nil
}
// NewRequest initializes and issues an HTTP request to the SecurePass endpoint
func (s *SecurePass) NewRequest(method, path string, buf *bytes.Buffer) (*http.Request, error) {
var err error
var req *http.Request
URL, err := s.makeRequestURL(path)
if err != nil {
return nil, err
}
if buf != nil {
req, err = http.NewRequest(method, URL, buf)
} else {
req, err = http.NewRequest(method, URL, nil)
}
if err != nil {
return nil, err
}
s.setupRequestFieds(req)
return req, nil
}
// NewClient initialize http.Client with a certain http.Transport
func NewClient(tr *http.Transport) *http.Client {
// Skip SSL certificate verification
if tr == nil {
tr = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
}
return &http.Client{Transport: tr}
}
// AppInfo represents /api/v1/apps/info
func (s *SecurePass) AppInfo(app string) (*AppInfoResponse, error) {
var obj AppInfoResponse
client := NewClient(nil)
data := url.Values{}
if app != "" {
data.Set("APP_ID", app)
}
req, err := s.NewRequest("POST", "/api/v1/apps/info", bytes.NewBufferString(data.Encode()))
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("%s", resp.Status)
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&obj)
if err != nil {
return nil, err
}
if obj.RC != 0 {
return &obj, fmt.Errorf("%v", obj.ErrorMsg)
}
return &obj, nil
}
// Ping reprenets the /api/v1/ping API call
func (s *SecurePass) Ping() (*PingResponse, error) {
var obj PingResponse
client := NewClient(nil)
req, err := s.NewRequest("GET", "/api/v1/ping", nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("%s", resp.Status)
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&obj)
if err != nil {
return nil, err
}
if obj.RC != 0 {
return &obj, fmt.Errorf("%v", obj.ErrorMsg)
}
return &obj, nil
}
Methods refactoring
Introduce new DoRequest method which contains the logic to perform an API call.
package securepass
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
const (
// DefaultRemote is the default Content-Type header used in HTTP requests
DefaultRemote = "https://beta.secure-pass.net"
// ContentType is the default Content-Type header used in HTTP requests
ContentType = "application/json"
// UserAgent contains the default User-Agent value used in HTTP requests
UserAgent = "SecurePass CLI"
)
// SecurePass main object type
type SecurePass struct {
AppID string `ini:"APP_ID"`
AppSecret string `ini:"APP_SECRET"`
Endpoint string
}
// NewSecurePass makes and initialize a new SecurePass instance
func NewSecurePass(appid, appsecret, remote string) (*SecurePass, error) {
u, err := url.Parse(remote)
if err != nil {
return nil, err
}
if !u.IsAbs() {
return nil, fmt.Errorf("'%s' is not an absolute URL", remote)
}
if u.Scheme != "https" {
return nil, fmt.Errorf("scheme of '%s' isn't 'https'", remote)
}
return &SecurePass{
AppID: appid,
AppSecret: appsecret,
Endpoint: remote}, nil
}
func (s *SecurePass) setupRequestFieds(req *http.Request) {
req.Header.Set("Accept", ContentType)
req.Header.Set("Content-Type", ContentType)
req.Header.Set("User-Agent", UserAgent)
req.Header.Set("X-SecurePass-App-ID", s.AppID)
req.Header.Set("X-SecurePass-App-Secret", s.AppSecret)
}
func (s *SecurePass) makeRequestURL(path string) (string, error) {
baseURL, _ := url.Parse(s.Endpoint)
URL, err := url.Parse(path)
if err != nil {
return "", err
}
return baseURL.ResolveReference(URL).String(), nil
}
// NewRequest initializes and issues an HTTP request to the SecurePass endpoint
func (s *SecurePass) NewRequest(method, path string, buf *bytes.Buffer) (*http.Request, error) {
var err error
var req *http.Request
URL, err := s.makeRequestURL(path)
if err != nil {
return nil, err
}
if buf != nil {
req, err = http.NewRequest(method, URL, buf)
} else {
req, err = http.NewRequest(method, URL, nil)
}
if err != nil {
return nil, err
}
s.setupRequestFieds(req)
return req, nil
}
// DoRequest issues an HTTP request
func (s *SecurePass) DoRequest(req *http.Request, obj APIResponse, expstatus int) error {
client := NewClient(nil)
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != expstatus {
return fmt.Errorf("%s", resp.Status)
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(obj)
if err != nil {
return err
}
if obj.ErrorCode() != 0 {
return fmt.Errorf("%d: %s", obj.ErrorCode(), obj.ErrorMessage())
}
return nil
}
// NewClient initialize http.Client with a certain http.Transport
func NewClient(tr *http.Transport) *http.Client {
// Skip SSL certificate verification
if tr == nil {
tr = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
}
return &http.Client{Transport: tr}
}
// AppInfo retrieves information on a SecurePass application
func (s *SecurePass) AppInfo(app string) (*AppInfoResponse, error) {
var obj AppInfoResponse
data := url.Values{}
if app != "" {
data.Set("APP_ID", app)
}
req, err := s.NewRequest("POST", "/api/v1/apps/info", bytes.NewBufferString(data.Encode()))
if err != nil {
return nil, err
}
err = s.DoRequest(req, &obj, 200)
return &obj, err
}
// AppInfo represents /api/v1/apps/info
func (s *SecurePass) AppAdd(app *ApplicationDescriptor) (*AppdAddResponse, error) {
var obj AppdAddResponse
data := url.Values{}
data.Set("LABEL", app.Label)
data.Set("WRITE", fmt.Sprintf("%v", app.Write))
data.Set("PRIVACY", fmt.Sprintf("%v", app.Privacy))
if app.AllowNetworkIPv4 != "" {
data.Set("ALLOW_NETWORK_IPv4", app.AllowNetworkIPv4)
}
if app.AllowNetworkIPv6 != "" {
data.Set("ALLOW_NETWORK_IPv6", app.AllowNetworkIPv6)
}
if app.Group != "" {
data.Set("GROUP", app.Group)
}
if app.Realm != "" {
data.Set("REALM", app.Realm)
}
req, err := s.NewRequest("POST", "/api/v1/apps/add", bytes.NewBufferString(data.Encode()))
if err != nil {
return nil, err
}
err = s.DoRequest(req, &obj, 200)
return &obj, err
}
// Ping reprenets the /api/v1/ping API call
func (s *SecurePass) Ping() (*PingResponse, error) {
var obj PingResponse
req, err := s.NewRequest("GET", "/api/v1/ping", nil)
if err != nil {
return nil, err
}
err = s.DoRequest(req, &obj, 200)
return &obj, err
}
|
package storage
import (
"errors"
"fmt"
"io"
"path"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
// layerWriter is used to control the various aspects of resumable
// layer upload. It implements the LayerUpload interface.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the contiguous write
fileWriter storagedriver.FileWriter
driver storagedriver.StorageDriver
path string
resumableDigestEnabled bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(); err != nil {
return distribution.Descriptor{}, err
}
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return distribution.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return distribution.Descriptor{}, err
}
return canonical, nil
}
// Rollback the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
context.GetLogger(ctx).Debug("(*blobWriter).Rollback")
if err := bw.fileWriter.Cancel(); err != nil {
return err
}
if err := bw.removeResources(ctx); err != nil {
return err
}
bw.Close()
return nil
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r)
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if err := bw.storeHashState(bw.blobStore.ctx); err != nil {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return distribution.Descriptor{}, err
}
} else {
if fi.IsDir() {
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return distribution.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.New()
digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
if err != nil {
return distribution.Descriptor{}, err
}
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(digestVerifier, tr); err != nil {
return distribution.Descriptor{}, err
}
canonical = digester.Digest()
verified = digestVerifier.Verified()
}
}
if !verified {
context.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// tars.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty tar.
if desc.Digest == digest.DigestSha256EmptyTar {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}
Add blobWrtiter.Close() call into blobWriter.Commit()
Signed-off-by: Serge Dubrouski <e8d130908ca8eaa35d680cada55d2ee7b8cdf97e@gmail.com>
package storage
import (
"errors"
"fmt"
"io"
"path"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
// layerWriter is used to control the various aspects of resumable
// layer upload. It implements the LayerUpload interface.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the contiguous write
fileWriter storagedriver.FileWriter
driver storagedriver.StorageDriver
path string
resumableDigestEnabled bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(); err != nil {
return distribution.Descriptor{}, err
}
bw.Close()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return distribution.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return distribution.Descriptor{}, err
}
return canonical, nil
}
// Rollback the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
context.GetLogger(ctx).Debug("(*blobWriter).Rollback")
if err := bw.fileWriter.Cancel(); err != nil {
return err
}
if err := bw.removeResources(ctx); err != nil {
return err
}
bw.Close()
return nil
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r)
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if err := bw.storeHashState(bw.blobStore.ctx); err != nil {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return distribution.Descriptor{}, err
}
} else {
if fi.IsDir() {
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return distribution.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.New()
digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
if err != nil {
return distribution.Descriptor{}, err
}
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(digestVerifier, tr); err != nil {
return distribution.Descriptor{}, err
}
canonical = digester.Digest()
verified = digestVerifier.Verified()
}
}
if !verified {
context.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// tars.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty tar.
if desc.Digest == digest.DigestSha256EmptyTar {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}
|
package plans
import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/states"
"github.com/zclconf/go-cty/cty"
)
// Changes describes various actions that Terraform will attempt to take if
// the corresponding plan is applied.
//
// A Changes object can be rendered into a visual diff (by the caller, using
// code in another package) for display to the user.
type Changes struct {
// Resources tracks planned changes to resource instance objects.
Resources []*ResourceInstanceChangeSrc
// Outputs tracks planned changes output values.
//
// Note that although an in-memory plan contains planned changes for
// outputs throughout the configuration, a plan serialized
// to disk retains only the root outputs because they are
// externally-visible, while other outputs are implementation details and
// can be easily re-calculated during the apply phase. Therefore only root
// module outputs will survive a round-trip through a plan file.
Outputs []*OutputChangeSrc
}
// NewChanges returns a valid Changes object that describes no changes.
func NewChanges() *Changes {
return &Changes{}
}
func (c *Changes) Empty() bool {
for _, res := range c.Resources {
if res.Action != NoOp {
return false
}
}
return true
}
// ResourceInstance returns the planned change for the current object of the
// resource instance of the given address, if any. Returns nil if no change is
// planned.
func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc {
addrStr := addr.String()
for _, rc := range c.Resources {
if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed {
return rc
}
}
return nil
}
// ResourceInstanceDeposed returns the plan change of a deposed object of
// the resource instance of the given address, if any. Returns nil if no change
// is planned.
func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc {
addrStr := addr.String()
for _, rc := range c.Resources {
if rc.Addr.String() == addrStr && rc.DeposedKey == key {
return rc
}
}
return nil
}
// OutputValue returns the planned change for the output value with the
// given address, if any. Returns nil if no change is planned.
func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc {
addrStr := addr.String()
for _, oc := range c.Outputs {
if oc.Addr.String() == addrStr {
return oc
}
}
return nil
}
// SyncWrapper returns a wrapper object around the receiver that can be used
// to make certain changes to the receiver in a concurrency-safe way, as long
// as all callers share the same wrapper object.
func (c *Changes) SyncWrapper() *ChangesSync {
return &ChangesSync{
changes: c,
}
}
// ResourceInstanceChange describes a change to a particular resource instance
// object.
type ResourceInstanceChange struct {
// Addr is the absolute address of the resource instance that the change
// will apply to.
Addr addrs.AbsResourceInstance
// DeposedKey is the identifier for a deposed object associated with the
// given instance, or states.NotDeposed if this change applies to the
// current object.
//
// A Replace change for a resource with create_before_destroy set will
// create a new DeposedKey temporarily during replacement. In that case,
// DeposedKey in the plan is always states.NotDeposed, representing that
// the current object is being replaced with the deposed.
DeposedKey states.DeposedKey
// Provider is the address of the provider configuration that was used
// to plan this change, and thus the configuration that must also be
// used to apply it.
ProviderAddr addrs.AbsProviderConfig
// Change is an embedded description of the change.
Change
// RequiredReplace is a set of paths that caused the change action to be
// Replace rather than Update. Always nil if the change action is not
// Replace.
//
// This is retained only for UI-plan-rendering purposes and so it does not
// currently survive a round-trip through a saved plan file.
RequiredReplace cty.PathSet
// Private allows a provider to stash any extra data that is opaque to
// Terraform that relates to this change. Terraform will save this
// byte-for-byte and return it to the provider in the apply call.
Private []byte
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file. Pass the implied type of the
// corresponding resource type schema for correct operation.
func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) {
cs, err := rc.Change.Encode(ty)
if err != nil {
return nil, err
}
return &ResourceInstanceChangeSrc{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
ProviderAddr: rc.ProviderAddr,
ChangeSrc: *cs,
RequiredReplace: rc.RequiredReplace,
Private: rc.Private,
}, err
}
// Simplify will, where possible, produce a change with a simpler action than
// the receiever given a flag indicating whether the caller is dealing with
// a normal apply or a destroy. This flag deals with the fact that Terraform
// Core uses a specialized graph node type for destroying; only that
// specialized node should set "destroying" to true.
//
// The following table shows the simplification behavior:
//
// Action Destroying? New Action
// --------+-------------+-----------
// Create true NoOp
// Delete false NoOp
// Replace true Delete
// Replace false Create
//
// For any combination not in the above table, the Simplify just returns the
// receiver as-is.
func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange {
if destroying {
switch rc.Action {
case Delete:
// We'll fall out and just return rc verbatim, then.
case CreateThenDelete, DeleteThenCreate:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: Delete,
Before: rc.Before,
After: cty.NullVal(rc.Before.Type()),
},
}
default:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: NoOp,
Before: rc.Before,
After: rc.Before,
},
}
}
} else {
switch rc.Action {
case Delete:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: NoOp,
Before: rc.Before,
After: rc.Before,
},
}
case CreateThenDelete, DeleteThenCreate:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: Create,
Before: cty.NullVal(rc.After.Type()),
After: rc.After,
},
}
}
}
// If we fall out here then our change is already simple enough.
return rc
}
// OutputChange describes a change to an output value.
type OutputChange struct {
// Addr is the absolute address of the output value that the change
// will apply to.
Addr addrs.AbsOutputValue
// Change is an embedded description of the change.
//
// For output value changes, the type constraint for the DynamicValue
// instances is always cty.DynamicPseudoType.
Change
// Sensitive, if true, indicates that either the old or new value in the
// change is sensitive and so a rendered version of the plan in the UI
// should elide the actual values while still indicating the action of the
// change.
Sensitive bool
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file.
func (oc *OutputChange) Encode() (*OutputChangeSrc, error) {
cs, err := oc.Change.Encode(cty.DynamicPseudoType)
if err != nil {
return nil, err
}
return &OutputChangeSrc{
ChangeSrc: *cs,
Sensitive: oc.Sensitive,
}, err
}
// Change describes a single change with a given action.
type Change struct {
// Action defines what kind of change is being made.
Action Action
// Interpretation of Before and After depend on Action:
//
// NoOp Before and After are the same, unchanged value
// Create Before is nil, and After is the expected value after create.
// Read Before is any prior value (nil if no prior), and After is the
// value that was or will be read.
// Update Before is the value prior to update, and After is the expected
// value after update.
// Replace As with Update.
// Delete Before is the value prior to delete, and After is always nil.
//
// Unknown values may appear anywhere within the Before and After values,
// either as the values themselves or as nested elements within known
// collections/structures.
Before, After cty.Value
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file. Pass the type constraint
// that the values are expected to conform to; to properly decode the values
// later an identical type constraint must be provided at that time.
//
// Where a Change is embedded in some other struct, it's generally better
// to call the corresponding Encode method of that struct rather than working
// directly with its embedded Change.
func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) {
beforeDV, err := NewDynamicValue(c.Before, ty)
if err != nil {
return nil, err
}
afterDV, err := NewDynamicValue(c.After, ty)
if err != nil {
return nil, err
}
return &ChangeSrc{
Action: c.Action,
Before: beforeDV,
After: afterDV,
}, nil
}
plans: OutputChange.Encode must preserve Addr field
package plans
import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/states"
"github.com/zclconf/go-cty/cty"
)
// Changes describes various actions that Terraform will attempt to take if
// the corresponding plan is applied.
//
// A Changes object can be rendered into a visual diff (by the caller, using
// code in another package) for display to the user.
type Changes struct {
// Resources tracks planned changes to resource instance objects.
Resources []*ResourceInstanceChangeSrc
// Outputs tracks planned changes output values.
//
// Note that although an in-memory plan contains planned changes for
// outputs throughout the configuration, a plan serialized
// to disk retains only the root outputs because they are
// externally-visible, while other outputs are implementation details and
// can be easily re-calculated during the apply phase. Therefore only root
// module outputs will survive a round-trip through a plan file.
Outputs []*OutputChangeSrc
}
// NewChanges returns a valid Changes object that describes no changes.
func NewChanges() *Changes {
return &Changes{}
}
func (c *Changes) Empty() bool {
for _, res := range c.Resources {
if res.Action != NoOp {
return false
}
}
return true
}
// ResourceInstance returns the planned change for the current object of the
// resource instance of the given address, if any. Returns nil if no change is
// planned.
func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc {
addrStr := addr.String()
for _, rc := range c.Resources {
if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed {
return rc
}
}
return nil
}
// ResourceInstanceDeposed returns the plan change of a deposed object of
// the resource instance of the given address, if any. Returns nil if no change
// is planned.
func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc {
addrStr := addr.String()
for _, rc := range c.Resources {
if rc.Addr.String() == addrStr && rc.DeposedKey == key {
return rc
}
}
return nil
}
// OutputValue returns the planned change for the output value with the
// given address, if any. Returns nil if no change is planned.
func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc {
addrStr := addr.String()
for _, oc := range c.Outputs {
if oc.Addr.String() == addrStr {
return oc
}
}
return nil
}
// SyncWrapper returns a wrapper object around the receiver that can be used
// to make certain changes to the receiver in a concurrency-safe way, as long
// as all callers share the same wrapper object.
func (c *Changes) SyncWrapper() *ChangesSync {
return &ChangesSync{
changes: c,
}
}
// ResourceInstanceChange describes a change to a particular resource instance
// object.
type ResourceInstanceChange struct {
// Addr is the absolute address of the resource instance that the change
// will apply to.
Addr addrs.AbsResourceInstance
// DeposedKey is the identifier for a deposed object associated with the
// given instance, or states.NotDeposed if this change applies to the
// current object.
//
// A Replace change for a resource with create_before_destroy set will
// create a new DeposedKey temporarily during replacement. In that case,
// DeposedKey in the plan is always states.NotDeposed, representing that
// the current object is being replaced with the deposed.
DeposedKey states.DeposedKey
// Provider is the address of the provider configuration that was used
// to plan this change, and thus the configuration that must also be
// used to apply it.
ProviderAddr addrs.AbsProviderConfig
// Change is an embedded description of the change.
Change
// RequiredReplace is a set of paths that caused the change action to be
// Replace rather than Update. Always nil if the change action is not
// Replace.
//
// This is retained only for UI-plan-rendering purposes and so it does not
// currently survive a round-trip through a saved plan file.
RequiredReplace cty.PathSet
// Private allows a provider to stash any extra data that is opaque to
// Terraform that relates to this change. Terraform will save this
// byte-for-byte and return it to the provider in the apply call.
Private []byte
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file. Pass the implied type of the
// corresponding resource type schema for correct operation.
func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) {
cs, err := rc.Change.Encode(ty)
if err != nil {
return nil, err
}
return &ResourceInstanceChangeSrc{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
ProviderAddr: rc.ProviderAddr,
ChangeSrc: *cs,
RequiredReplace: rc.RequiredReplace,
Private: rc.Private,
}, err
}
// Simplify will, where possible, produce a change with a simpler action than
// the receiever given a flag indicating whether the caller is dealing with
// a normal apply or a destroy. This flag deals with the fact that Terraform
// Core uses a specialized graph node type for destroying; only that
// specialized node should set "destroying" to true.
//
// The following table shows the simplification behavior:
//
// Action Destroying? New Action
// --------+-------------+-----------
// Create true NoOp
// Delete false NoOp
// Replace true Delete
// Replace false Create
//
// For any combination not in the above table, the Simplify just returns the
// receiver as-is.
func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange {
if destroying {
switch rc.Action {
case Delete:
// We'll fall out and just return rc verbatim, then.
case CreateThenDelete, DeleteThenCreate:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: Delete,
Before: rc.Before,
After: cty.NullVal(rc.Before.Type()),
},
}
default:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: NoOp,
Before: rc.Before,
After: rc.Before,
},
}
}
} else {
switch rc.Action {
case Delete:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: NoOp,
Before: rc.Before,
After: rc.Before,
},
}
case CreateThenDelete, DeleteThenCreate:
return &ResourceInstanceChange{
Addr: rc.Addr,
DeposedKey: rc.DeposedKey,
Private: rc.Private,
ProviderAddr: rc.ProviderAddr,
Change: Change{
Action: Create,
Before: cty.NullVal(rc.After.Type()),
After: rc.After,
},
}
}
}
// If we fall out here then our change is already simple enough.
return rc
}
// OutputChange describes a change to an output value.
type OutputChange struct {
// Addr is the absolute address of the output value that the change
// will apply to.
Addr addrs.AbsOutputValue
// Change is an embedded description of the change.
//
// For output value changes, the type constraint for the DynamicValue
// instances is always cty.DynamicPseudoType.
Change
// Sensitive, if true, indicates that either the old or new value in the
// change is sensitive and so a rendered version of the plan in the UI
// should elide the actual values while still indicating the action of the
// change.
Sensitive bool
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file.
func (oc *OutputChange) Encode() (*OutputChangeSrc, error) {
cs, err := oc.Change.Encode(cty.DynamicPseudoType)
if err != nil {
return nil, err
}
return &OutputChangeSrc{
Addr: oc.Addr,
ChangeSrc: *cs,
Sensitive: oc.Sensitive,
}, err
}
// Change describes a single change with a given action.
type Change struct {
// Action defines what kind of change is being made.
Action Action
// Interpretation of Before and After depend on Action:
//
// NoOp Before and After are the same, unchanged value
// Create Before is nil, and After is the expected value after create.
// Read Before is any prior value (nil if no prior), and After is the
// value that was or will be read.
// Update Before is the value prior to update, and After is the expected
// value after update.
// Replace As with Update.
// Delete Before is the value prior to delete, and After is always nil.
//
// Unknown values may appear anywhere within the Before and After values,
// either as the values themselves or as nested elements within known
// collections/structures.
Before, After cty.Value
}
// Encode produces a variant of the reciever that has its change values
// serialized so it can be written to a plan file. Pass the type constraint
// that the values are expected to conform to; to properly decode the values
// later an identical type constraint must be provided at that time.
//
// Where a Change is embedded in some other struct, it's generally better
// to call the corresponding Encode method of that struct rather than working
// directly with its embedded Change.
func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) {
beforeDV, err := NewDynamicValue(c.Before, ty)
if err != nil {
return nil, err
}
afterDV, err := NewDynamicValue(c.After, ty)
if err != nil {
return nil, err
}
return &ChangeSrc{
Action: c.Action,
Before: beforeDV,
After: afterDV,
}, nil
}
|
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"time"
"google.golang.org/grpc"
"github.com/golang/protobuf/proto"
pb "github.com/wallaceicy06/muni-sign/proto"
)
const configFile = "/Users/sean/muni_sign_config.pb.txt"
var displayAddr = flag.String("display_addr", "raspberrypi.local:50051", "The display server address in the format of host:port")
var nextbusAddr = flag.String("nextbus_addr", "localhost:8080", "The nextbus server address in the format of host:port")
var colors = []*pb.Color{
{
Red: 1.0,
Green: 0.0,
Blue: 0.0,
},
{
Red: 0.0,
Green: 1.0,
Blue: 0.0,
},
{
Red: 0.0,
Green: 0.0,
Blue: 1.0,
},
}
func main() {
dspConn, err := grpc.Dial(*displayAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Error connecting to display server: %v", err)
}
dspClient := pb.NewDisplayDriverClient(dspConn)
nbConn, err := grpc.Dial(*nextbusAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Error connecting to nextbus server: %v", err)
}
nbClient := pb.NewNextbusClient(nbConn)
for {
config, err := readConfigFile()
if err != nil {
log.Fatalf("Error reading configuration file: %v", err)
}
for i, stopId := range config.GetStopIds() {
res, err := nbClient.ListPredictions(context.Background(), &pb.ListPredictionsRequest{
Agency: config.GetAgency(),
StopId: stopId,
})
if err != nil {
log.Fatalf("Error listing predictions: %v", err)
}
for _, pred := range res.GetPredictions() {
var msg string
if l := len(pred.GetNextArrivals()); l == 1 {
msg = fmt.Sprintf("%s-%s\n%d mins", pred.GetRoute(), pred.GetDestination(), pred.GetNextArrivals()[0])
} else if l >= 2 {
msg = fmt.Sprintf("%s-%s\n%d & %d mins", pred.GetRoute(), pred.GetDestination(), pred.GetNextArrivals()[0], pred.GetNextArrivals()[1])
} else {
continue
}
req := &pb.WriteRequest{
Message: msg,
Color: colors[i%len(colors)],
}
if _, err := dspClient.Write(context.Background(), req); err != nil {
log.Fatalf("Error writing: %v", err)
}
time.Sleep(time.Second * 5)
}
}
}
}
func readConfigFile() (*pb.Configuration, error) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("error reading file: %v", err)
}
parsedConfig := &pb.Configuration{}
if err := proto.UnmarshalText(string(data), parsedConfig); err != nil {
return nil, fmt.Errorf("error unmarshalling config proto: %v", err)
}
return parsedConfig, nil
}
Forgot to parse flags in the driver.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"time"
"google.golang.org/grpc"
"github.com/golang/protobuf/proto"
pb "github.com/wallaceicy06/muni-sign/proto"
)
const configFile = "/Users/sean/muni_sign_config.pb.txt"
var displayAddr = flag.String("display_addr", "raspberrypi.local:50051", "The display server address in the format of host:port")
var nextbusAddr = flag.String("nextbus_addr", "localhost:8080", "The nextbus server address in the format of host:port")
var colors = []*pb.Color{
{
Red: 1.0,
Green: 0.0,
Blue: 0.0,
},
{
Red: 0.0,
Green: 1.0,
Blue: 0.0,
},
{
Red: 0.0,
Green: 0.0,
Blue: 1.0,
},
}
func main() {
flag.Parse()
dspConn, err := grpc.Dial(*displayAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Error connecting to display server: %v", err)
}
dspClient := pb.NewDisplayDriverClient(dspConn)
nbConn, err := grpc.Dial(*nextbusAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Error connecting to nextbus server: %v", err)
}
nbClient := pb.NewNextbusClient(nbConn)
for {
config, err := readConfigFile()
if err != nil {
log.Fatalf("Error reading configuration file: %v", err)
}
for i, stopId := range config.GetStopIds() {
res, err := nbClient.ListPredictions(context.Background(), &pb.ListPredictionsRequest{
Agency: config.GetAgency(),
StopId: stopId,
})
if err != nil {
log.Fatalf("Error listing predictions: %v", err)
}
for _, pred := range res.GetPredictions() {
var msg string
if l := len(pred.GetNextArrivals()); l == 1 {
msg = fmt.Sprintf("%s-%s\n%d mins", pred.GetRoute(), pred.GetDestination(), pred.GetNextArrivals()[0])
} else if l >= 2 {
msg = fmt.Sprintf("%s-%s\n%d & %d mins", pred.GetRoute(), pred.GetDestination(), pred.GetNextArrivals()[0], pred.GetNextArrivals()[1])
} else {
continue
}
req := &pb.WriteRequest{
Message: msg,
Color: colors[i%len(colors)],
}
if _, err := dspClient.Write(context.Background(), req); err != nil {
log.Fatalf("Error writing: %v", err)
}
time.Sleep(time.Second * 5)
}
}
}
}
func readConfigFile() (*pb.Configuration, error) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("error reading file: %v", err)
}
parsedConfig := &pb.Configuration{}
if err := proto.UnmarshalText(string(data), parsedConfig); err != nil {
return nil, fmt.Errorf("error unmarshalling config proto: %v", err)
}
return parsedConfig, nil
}
|
package io
import (
mg "mingle"
mgRct "mingle/reactor"
// "fmt"
// "bytes"
"io"
// "bitgirder/objpath"
// "time"
// "log"
// bgio "bitgirder/io"
)
type BinWriter struct { *mg.BinWriter }
func NewWriter( w io.Writer ) *BinWriter {
return &BinWriter{ mg.NewWriter( w ) }
}
type writeReactor struct { *BinWriter }
func ( w writeReactor ) startStruct( qn *mg.QualifiedTypeName ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeStruct ); err != nil { return err }
if err := w.WriteInt32( int32( -1 ) ); err != nil { return err }
return w.WriteQualifiedTypeName( qn )
}
func ( w writeReactor ) startField( fld *mg.Identifier ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeField ); err != nil { return err }
return w.WriteIdentifier( fld )
}
func ( w writeReactor ) startList( lse *mgRct.ListStartEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeList ); err != nil { return err }
if err := w.WritePointerId( lse.Id ); err != nil { return err }
if err := w.WriteListTypeReference( lse.Type ); err != nil { return err }
return w.WriteInt32( -1 )
}
func ( w writeReactor ) startMap( id mg.PointerId ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeSymMap ); err != nil { return err }
return w.WritePointerId( id )
}
func ( w writeReactor ) value( val mg.Value ) error {
return w.WriteScalarValue( val )
}
func ( w writeReactor ) writeValuePointerAlloc(
vp *mgRct.ValueAllocationEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeValPtrAlloc ); err != nil {
return err
}
if err := w.WriteTypeReference( vp.Type ); err != nil { return err }
return w.WritePointerId( vp.Id )
}
func ( w writeReactor ) writeValuePointerReference(
v *mgRct.ValueReferenceEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeValPtrRef ); err != nil {
return err
}
return w.WritePointerId( v.Id )
}
func ( w writeReactor ) ProcessEvent( ev mgRct.ReactorEvent ) error {
switch v := ev.( type ) {
case *mgRct.ValueEvent: return w.value( v.Val )
case *mgRct.MapStartEvent: return w.startMap( v.Id )
case *mgRct.StructStartEvent: return w.startStruct( v.Type )
case *mgRct.ListStartEvent: return w.startList( v )
case *mgRct.FieldStartEvent: return w.startField( v.Field )
case *mgRct.EndEvent: return w.WriteTypeCode( mg.IoTypeCodeEnd )
case *mgRct.ValueAllocationEvent: return w.writeValuePointerAlloc( v )
case *mgRct.ValueReferenceEvent: return w.writeValuePointerReference( v )
}
panic( libErrorf( "unhandled event type: %T", ev ) )
}
func ( w *BinWriter ) AsReactor() mgRct.ReactorEventProcessor {
return writeReactor{ w }
}
func ( w *BinWriter ) WriteValue( val mg.Value ) ( err error ) {
return mgRct.VisitValue( val, w.AsReactor() )
}
type BinReader struct {
*mg.BinReader
}
func NewReader( r io.Reader ) *BinReader {
return &BinReader{ mg.NewReader( r ) }
}
func ( r *BinReader ) readScalarValue(
tc mg.IoTypeCode, rep mgRct.ReactorEventProcessor ) error {
val, err := r.ReadScalarValue( tc )
if err != nil { return err }
return rep.ProcessEvent( mgRct.NewValueEvent( val ) )
}
func ( r *BinReader ) readValuePointerAlloc(
rep mgRct.ReactorEventProcessor ) error {
if typ, err := r.ReadTypeReference(); err == nil {
if id, err := r.ReadPointerId(); err == nil {
ev := mgRct.NewValueAllocationEvent( typ, id )
if err := rep.ProcessEvent( ev ); err != nil { return err }
} else { return err }
} else { return err }
return r.implReadValue( rep )
}
func ( r *BinReader ) readValuePointerReference(
rep mgRct.ReactorEventProcessor ) error {
id, err := r.ReadPointerId()
if err != nil { return err }
return rep.ProcessEvent( mgRct.NewValueReferenceEvent( id ) )
}
func ( r *BinReader ) readMapFields( rep mgRct.ReactorEventProcessor ) error {
for {
tc, err := r.ReadTypeCode()
if err != nil { return err }
switch tc {
case mg.IoTypeCodeEnd: return rep.ProcessEvent( mgRct.NewEndEvent() )
case mg.IoTypeCodeField:
id, err := r.ReadIdentifier()
if err == nil {
err = rep.ProcessEvent( mgRct.NewFieldStartEvent( id ) )
}
if err != nil { return err }
if err := r.implReadValue( rep ); err != nil { return err }
default: return r.IoErrorf( "Unexpected map pair code: 0x%02x", tc )
}
}
panic( libErrorf( "unreachable" ) )
}
func ( r *BinReader ) readSymbolMap( rep mgRct.ReactorEventProcessor ) error {
if id, err := r.ReadPointerId(); err == nil {
if err := rep.ProcessEvent( mgRct.NewMapStartEvent( id ) ); err != nil {
return err
}
} else { return err }
return r.readMapFields( rep )
}
func ( r *BinReader ) readStruct( rep mgRct.ReactorEventProcessor ) error {
if _, err := r.ReadInt32(); err != nil { return err }
if qn, err := r.ReadQualifiedTypeName(); err == nil {
ev := mgRct.NewStructStartEvent( qn )
if err = rep.ProcessEvent( ev ); err != nil { return err }
} else { return err }
return r.readMapFields( rep )
}
func ( r *BinReader ) readListHeader( rep mgRct.ReactorEventProcessor ) error {
if id, err := r.ReadPointerId(); err == nil {
if typ, err := r.ReadListTypeReference(); err == nil {
lse := mgRct.NewListStartEvent( typ, id )
if err = rep.ProcessEvent( lse ); err != nil { return err }
} else { return err }
} else { return err }
return nil
}
func ( r *BinReader ) readListValues( rep mgRct.ReactorEventProcessor ) error {
if _, err := r.ReadInt32(); err != nil { return err } // skip size
for {
tc, err := r.PeekTypeCode()
if err != nil { return err }
if tc == mg.IoTypeCodeEnd {
if _, err = r.ReadTypeCode(); err != nil { return err }
return rep.ProcessEvent( mgRct.NewEndEvent() )
} else {
if err = r.implReadValue( rep ); err != nil { return err }
}
}
panic( libErrorf( "Unreachable" ) )
}
func ( r *BinReader ) readList( rep mgRct.ReactorEventProcessor ) error {
if err := r.readListHeader( rep ); err != nil { return err }
return r.readListValues( rep )
}
func ( r *BinReader ) implReadValue( rep mgRct.ReactorEventProcessor ) error {
tc, err := r.ReadTypeCode()
if err != nil { return err }
switch tc {
case mg.IoTypeCodeNull, mg.IoTypeCodeString, mg.IoTypeCodeBuffer,
mg.IoTypeCodeTimestamp, mg.IoTypeCodeInt32, mg.IoTypeCodeInt64,
mg.IoTypeCodeUint32, mg.IoTypeCodeUint64, mg.IoTypeCodeFloat32,
mg.IoTypeCodeFloat64, mg.IoTypeCodeBool, mg.IoTypeCodeEnum:
return r.readScalarValue( tc, rep )
case mg.IoTypeCodeValPtrAlloc: return r.readValuePointerAlloc( rep )
case mg.IoTypeCodeValPtrRef: return r.readValuePointerReference( rep )
case mg.IoTypeCodeSymMap: return r.readSymbolMap( rep )
case mg.IoTypeCodeStruct: return r.readStruct( rep )
case mg.IoTypeCodeList: return r.readList( rep )
default: return r.IoErrorf( "unrecognized value code: 0x%02x", tc )
}
panic( libErrorf( "unreachable" ) )
}
func ( r *BinReader ) ReadReactorValue(
rep mgRct.ReactorEventProcessor ) error {
return r.implReadValue( rep )
}
func ( r *BinReader ) ReadValue() ( mg.Value, error ) {
vb := mgRct.NewValueBuilder()
pip := mgRct.InitReactorPipeline( vb )
err := r.ReadReactorValue( pip )
if err != nil { return nil, err }
return vb.GetValue(), nil
}
remove some commented code
package io
import (
mg "mingle"
mgRct "mingle/reactor"
"io"
)
type BinWriter struct { *mg.BinWriter }
func NewWriter( w io.Writer ) *BinWriter {
return &BinWriter{ mg.NewWriter( w ) }
}
type writeReactor struct { *BinWriter }
func ( w writeReactor ) startStruct( qn *mg.QualifiedTypeName ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeStruct ); err != nil { return err }
if err := w.WriteInt32( int32( -1 ) ); err != nil { return err }
return w.WriteQualifiedTypeName( qn )
}
func ( w writeReactor ) startField( fld *mg.Identifier ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeField ); err != nil { return err }
return w.WriteIdentifier( fld )
}
func ( w writeReactor ) startList( lse *mgRct.ListStartEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeList ); err != nil { return err }
if err := w.WritePointerId( lse.Id ); err != nil { return err }
if err := w.WriteListTypeReference( lse.Type ); err != nil { return err }
return w.WriteInt32( -1 )
}
func ( w writeReactor ) startMap( id mg.PointerId ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeSymMap ); err != nil { return err }
return w.WritePointerId( id )
}
func ( w writeReactor ) value( val mg.Value ) error {
return w.WriteScalarValue( val )
}
func ( w writeReactor ) writeValuePointerAlloc(
vp *mgRct.ValueAllocationEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeValPtrAlloc ); err != nil {
return err
}
if err := w.WriteTypeReference( vp.Type ); err != nil { return err }
return w.WritePointerId( vp.Id )
}
func ( w writeReactor ) writeValuePointerReference(
v *mgRct.ValueReferenceEvent ) error {
if err := w.WriteTypeCode( mg.IoTypeCodeValPtrRef ); err != nil {
return err
}
return w.WritePointerId( v.Id )
}
func ( w writeReactor ) ProcessEvent( ev mgRct.ReactorEvent ) error {
switch v := ev.( type ) {
case *mgRct.ValueEvent: return w.value( v.Val )
case *mgRct.MapStartEvent: return w.startMap( v.Id )
case *mgRct.StructStartEvent: return w.startStruct( v.Type )
case *mgRct.ListStartEvent: return w.startList( v )
case *mgRct.FieldStartEvent: return w.startField( v.Field )
case *mgRct.EndEvent: return w.WriteTypeCode( mg.IoTypeCodeEnd )
case *mgRct.ValueAllocationEvent: return w.writeValuePointerAlloc( v )
case *mgRct.ValueReferenceEvent: return w.writeValuePointerReference( v )
}
panic( libErrorf( "unhandled event type: %T", ev ) )
}
func ( w *BinWriter ) AsReactor() mgRct.ReactorEventProcessor {
return writeReactor{ w }
}
func ( w *BinWriter ) WriteValue( val mg.Value ) ( err error ) {
return mgRct.VisitValue( val, w.AsReactor() )
}
type BinReader struct {
*mg.BinReader
}
func NewReader( r io.Reader ) *BinReader {
return &BinReader{ mg.NewReader( r ) }
}
func ( r *BinReader ) readScalarValue(
tc mg.IoTypeCode, rep mgRct.ReactorEventProcessor ) error {
val, err := r.ReadScalarValue( tc )
if err != nil { return err }
return rep.ProcessEvent( mgRct.NewValueEvent( val ) )
}
func ( r *BinReader ) readValuePointerAlloc(
rep mgRct.ReactorEventProcessor ) error {
if typ, err := r.ReadTypeReference(); err == nil {
if id, err := r.ReadPointerId(); err == nil {
ev := mgRct.NewValueAllocationEvent( typ, id )
if err := rep.ProcessEvent( ev ); err != nil { return err }
} else { return err }
} else { return err }
return r.implReadValue( rep )
}
func ( r *BinReader ) readValuePointerReference(
rep mgRct.ReactorEventProcessor ) error {
id, err := r.ReadPointerId()
if err != nil { return err }
return rep.ProcessEvent( mgRct.NewValueReferenceEvent( id ) )
}
func ( r *BinReader ) readMapFields( rep mgRct.ReactorEventProcessor ) error {
for {
tc, err := r.ReadTypeCode()
if err != nil { return err }
switch tc {
case mg.IoTypeCodeEnd: return rep.ProcessEvent( mgRct.NewEndEvent() )
case mg.IoTypeCodeField:
id, err := r.ReadIdentifier()
if err == nil {
err = rep.ProcessEvent( mgRct.NewFieldStartEvent( id ) )
}
if err != nil { return err }
if err := r.implReadValue( rep ); err != nil { return err }
default: return r.IoErrorf( "Unexpected map pair code: 0x%02x", tc )
}
}
panic( libErrorf( "unreachable" ) )
}
func ( r *BinReader ) readSymbolMap( rep mgRct.ReactorEventProcessor ) error {
if id, err := r.ReadPointerId(); err == nil {
if err := rep.ProcessEvent( mgRct.NewMapStartEvent( id ) ); err != nil {
return err
}
} else { return err }
return r.readMapFields( rep )
}
func ( r *BinReader ) readStruct( rep mgRct.ReactorEventProcessor ) error {
if _, err := r.ReadInt32(); err != nil { return err }
if qn, err := r.ReadQualifiedTypeName(); err == nil {
ev := mgRct.NewStructStartEvent( qn )
if err = rep.ProcessEvent( ev ); err != nil { return err }
} else { return err }
return r.readMapFields( rep )
}
func ( r *BinReader ) readListHeader( rep mgRct.ReactorEventProcessor ) error {
if id, err := r.ReadPointerId(); err == nil {
if typ, err := r.ReadListTypeReference(); err == nil {
lse := mgRct.NewListStartEvent( typ, id )
if err = rep.ProcessEvent( lse ); err != nil { return err }
} else { return err }
} else { return err }
return nil
}
func ( r *BinReader ) readListValues( rep mgRct.ReactorEventProcessor ) error {
if _, err := r.ReadInt32(); err != nil { return err } // skip size
for {
tc, err := r.PeekTypeCode()
if err != nil { return err }
if tc == mg.IoTypeCodeEnd {
if _, err = r.ReadTypeCode(); err != nil { return err }
return rep.ProcessEvent( mgRct.NewEndEvent() )
} else {
if err = r.implReadValue( rep ); err != nil { return err }
}
}
panic( libErrorf( "Unreachable" ) )
}
func ( r *BinReader ) readList( rep mgRct.ReactorEventProcessor ) error {
if err := r.readListHeader( rep ); err != nil { return err }
return r.readListValues( rep )
}
func ( r *BinReader ) implReadValue( rep mgRct.ReactorEventProcessor ) error {
tc, err := r.ReadTypeCode()
if err != nil { return err }
switch tc {
case mg.IoTypeCodeNull, mg.IoTypeCodeString, mg.IoTypeCodeBuffer,
mg.IoTypeCodeTimestamp, mg.IoTypeCodeInt32, mg.IoTypeCodeInt64,
mg.IoTypeCodeUint32, mg.IoTypeCodeUint64, mg.IoTypeCodeFloat32,
mg.IoTypeCodeFloat64, mg.IoTypeCodeBool, mg.IoTypeCodeEnum:
return r.readScalarValue( tc, rep )
case mg.IoTypeCodeValPtrAlloc: return r.readValuePointerAlloc( rep )
case mg.IoTypeCodeValPtrRef: return r.readValuePointerReference( rep )
case mg.IoTypeCodeSymMap: return r.readSymbolMap( rep )
case mg.IoTypeCodeStruct: return r.readStruct( rep )
case mg.IoTypeCodeList: return r.readList( rep )
default: return r.IoErrorf( "unrecognized value code: 0x%02x", tc )
}
panic( libErrorf( "unreachable" ) )
}
func ( r *BinReader ) ReadReactorValue(
rep mgRct.ReactorEventProcessor ) error {
return r.implReadValue( rep )
}
func ( r *BinReader ) ReadValue() ( mg.Value, error ) {
vb := mgRct.NewValueBuilder()
pip := mgRct.InitReactorPipeline( vb )
err := r.ReadReactorValue( pip )
if err != nil { return nil, err }
return vb.GetValue(), nil
}
|
package vault
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"github.com/hashicorp/vault/api"
"github.com/MiLk/nsscache-go/source"
)
type wrappedData struct {
Token string `json:"token"`
Accessor string `json:"accessor"`
TTL int `json:"ttl"`
CreationTime string `json:"creation_time"`
CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor"`
}
// CreateVaultSource returns a vault source with a client associated to work with
func CreateVaultSource(prefix string, fpath string) (source.Source, error) {
client, err := CreateVaultClient(fpath)
if err != nil {
return nil, err
}
return NewSource(Client(client), Prefix(prefix))
}
// CreateVaultClient returns a Vault Client with a valid Token provided by the Vault Agent assigned to it.
//
// `fpath` indicates the path of the file to read from. This file is where the token provided by the agent is stored.
func CreateVaultClient(fpath string) (*api.Client, error) {
client, err := api.NewClient(nil)
if err != nil {
return nil, err
}
rawToken, err := ReadToken(fpath)
if err != nil {
return nil, err
}
if len(rawToken) == 0 {
return nil, errors.New("Token file is empty")
}
var wrappedData wrappedData
var token string
// Check if the token has been stored in JSON format (wrapped token) or as a plain string
if rawToken[0] == '{' {
if err := json.Unmarshal(rawToken, &wrappedData); err == nil {
unwrapToken := wrappedData.Token
if unwrapToken == "" {
return nil, errors.New("Unwrap token is empty")
}
secret, err := client.Logical().Unwrap(unwrapToken)
if err != nil {
return nil, err
}
if secret == nil {
return nil, errors.New("Could not find wrapped response")
}
dataToken, ok := secret.Data["token"].(string)
if !ok {
return nil, errors.New("Key `token` was not found on the unwrapped data")
}
token = dataToken
} else {
return nil, err
}
} else {
token = string(rawToken)
}
if token == "" {
return nil, errors.New("Unable to fetch token from file")
}
client.SetToken(token)
return client, nil
}
// ReadToken returns a byte array containing data from the designated file.
//
// `fpath` indicates the path where the file is located at.
func ReadToken(fpath string) ([]byte, error) {
tokenFile, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer tokenFile.Close()
rawToken, err := ioutil.ReadAll(tokenFile)
if err != nil {
return nil, err
}
return rawToken, nil
}
source/vault: idiomatic error formatting
package vault
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"github.com/hashicorp/vault/api"
"github.com/MiLk/nsscache-go/source"
)
type wrappedData struct {
Token string `json:"token"`
Accessor string `json:"accessor"`
TTL int `json:"ttl"`
CreationTime string `json:"creation_time"`
CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor"`
}
// CreateVaultSource returns a vault source with a client associated to work with
func CreateVaultSource(prefix string, fpath string) (source.Source, error) {
client, err := CreateVaultClient(fpath)
if err != nil {
return nil, err
}
return NewSource(Client(client), Prefix(prefix))
}
// CreateVaultClient returns a Vault Client with a valid Token provided by the Vault Agent assigned to it.
//
// `fpath` indicates the path of the file to read from. This file is where the token provided by the agent is stored.
func CreateVaultClient(fpath string) (*api.Client, error) {
client, err := api.NewClient(nil)
if err != nil {
return nil, err
}
rawToken, err := ReadToken(fpath)
if err != nil {
return nil, err
}
if len(rawToken) == 0 {
return nil, errors.New("token file is empty")
}
var wrappedData wrappedData
var token string
// Check if the token has been stored in JSON format (wrapped token) or as a plain string
if rawToken[0] == '{' {
if err := json.Unmarshal(rawToken, &wrappedData); err == nil {
unwrapToken := wrappedData.Token
if unwrapToken == "" {
return nil, errors.New("unwrap token is empty")
}
secret, err := client.Logical().Unwrap(unwrapToken)
if err != nil {
return nil, err
}
if secret == nil {
return nil, errors.New("could not find wrapped response")
}
dataToken, ok := secret.Data["token"].(string)
if !ok {
return nil, errors.New("key `token` was not found on the unwrapped data")
}
token = dataToken
} else {
return nil, err
}
} else {
token = string(rawToken)
}
if token == "" {
return nil, errors.New("unable to fetch token from file")
}
client.SetToken(token)
return client, nil
}
// ReadToken returns a byte array containing data from the designated file.
//
// `fpath` indicates the path where the file is located at.
func ReadToken(fpath string) ([]byte, error) {
tokenFile, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer tokenFile.Close()
rawToken, err := ioutil.ReadAll(tokenFile)
if err != nil {
return nil, err
}
return rawToken, nil
}
|
/*
Copyright 2015-2019 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package auth implements certificate signing authority and access control server
// Authority server is composed of several parts:
//
// * Authority server itself that implements signing and acl logic
// * HTTP server wrapper for authority server
// * HTTP client wrapper
//
package auth
import (
"context"
"crypto"
"crypto/subtle"
"fmt"
"math/rand"
"net/url"
"strings"
"sync"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
kubeutils "github.com/gravitational/teleport/lib/kube/utils"
"github.com/gravitational/teleport/lib/limiter"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/services/local"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/sshca"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/lib/wrappers"
"github.com/pborman/uuid"
"github.com/coreos/go-oidc/oauth2"
"github.com/coreos/go-oidc/oidc"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/prometheus/client_golang/prometheus"
saml2 "github.com/russellhaering/gosaml2"
"github.com/tstranex/u2f"
"golang.org/x/crypto/ssh"
)
// ServerOption allows setting options as functional arguments to Server
type ServerOption func(*Server)
// NewServer creates and configures a new Server instance
func NewServer(cfg *InitConfig, opts ...ServerOption) (*Server, error) {
if cfg.Trust == nil {
cfg.Trust = local.NewCAService(cfg.Backend)
}
if cfg.Presence == nil {
cfg.Presence = local.NewPresenceService(cfg.Backend)
}
if cfg.Provisioner == nil {
cfg.Provisioner = local.NewProvisioningService(cfg.Backend)
}
if cfg.Identity == nil {
cfg.Identity = local.NewIdentityService(cfg.Backend)
}
if cfg.Access == nil {
cfg.Access = local.NewAccessService(cfg.Backend)
}
if cfg.DynamicAccess == nil {
cfg.DynamicAccess = local.NewDynamicAccessService(cfg.Backend)
}
if cfg.ClusterConfiguration == nil {
cfg.ClusterConfiguration = local.NewClusterConfigurationService(cfg.Backend)
}
if cfg.Events == nil {
cfg.Events = local.NewEventsService(cfg.Backend)
}
if cfg.AuditLog == nil {
cfg.AuditLog = events.NewDiscardAuditLog()
}
if cfg.Emitter == nil {
cfg.Emitter = events.NewDiscardEmitter()
}
if cfg.Streamer == nil {
cfg.Streamer = events.NewDiscardEmitter()
}
limiter, err := limiter.NewConnectionsLimiter(limiter.Config{
MaxConnections: defaults.LimiterMaxConcurrentSignatures,
})
if err != nil {
return nil, trace.Wrap(err)
}
closeCtx, cancelFunc := context.WithCancel(context.TODO())
as := Server{
bk: cfg.Backend,
limiter: limiter,
Authority: cfg.Authority,
AuthServiceName: cfg.AuthServiceName,
oidcClients: make(map[string]*oidcClient),
samlProviders: make(map[string]*samlProvider),
githubClients: make(map[string]*githubClient),
caSigningAlg: cfg.CASigningAlg,
cancelFunc: cancelFunc,
closeCtx: closeCtx,
emitter: cfg.Emitter,
streamer: cfg.Streamer,
Services: Services{
Trust: cfg.Trust,
Presence: cfg.Presence,
Provisioner: cfg.Provisioner,
Identity: cfg.Identity,
Access: cfg.Access,
DynamicAccess: cfg.DynamicAccess,
ClusterConfiguration: cfg.ClusterConfiguration,
IAuditLog: cfg.AuditLog,
Events: cfg.Events,
},
}
for _, o := range opts {
o(&as)
}
if as.clock == nil {
as.clock = clockwork.NewRealClock()
}
return &as, nil
}
type Services struct {
services.Trust
services.Presence
services.Provisioner
services.Identity
services.Access
services.DynamicAccess
services.ClusterConfiguration
services.Events
events.IAuditLog
}
var (
generateRequestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: teleport.MetricGenerateRequests,
Help: "Number of requests to generate new server keys",
},
)
generateThrottledRequestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: teleport.MetricGenerateRequestsThrottled,
Help: "Number of throttled requests to generate new server keys",
},
)
generateRequestsCurrent = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: teleport.MetricGenerateRequestsCurrent,
Help: "Number of current generate requests for server keys",
},
)
generateRequestsLatencies = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: teleport.MetricGenerateRequestsHistogram,
Help: "Latency for generate requests for server keys",
// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
// highest bucket start of 0.001 sec * 2^15 == 32.768 sec
Buckets: prometheus.ExponentialBuckets(0.001, 2, 16),
},
)
)
// Server keeps the cluster together. It acts as a certificate authority (CA) for
// a cluster and:
// - generates the keypair for the node it's running on
// - invites other SSH nodes to a cluster, by issuing invite tokens
// - adds other SSH nodes to a cluster, by checking their token and signing their keys
// - same for users and their sessions
// - checks public keys to see if they're signed by it (can be trusted or not)
type Server struct {
lock sync.RWMutex
oidcClients map[string]*oidcClient
samlProviders map[string]*samlProvider
githubClients map[string]*githubClient
clock clockwork.Clock
bk backend.Backend
closeCtx context.Context
cancelFunc context.CancelFunc
sshca.Authority
// AuthServiceName is a human-readable name of this CA. If several Auth services are running
// (managing multiple teleport clusters) this field is used to tell them apart in UIs
// It usually defaults to the hostname of the machine the Auth service runs on.
AuthServiceName string
// Services encapsulate services - provisioner, trust, etc
// used by the auth server in a separate structure
Services
// privateKey is used in tests to use pre-generated private keys
privateKey []byte
// cipherSuites is a list of ciphersuites that the auth server supports.
cipherSuites []uint16
// caSigningAlg is an SSH signing algorithm to use when generating new CAs.
caSigningAlg *string
// cache is a fast cache that allows auth server
// to use cache for most frequent operations,
// if not set, cache uses itself
cache Cache
limiter *limiter.ConnectionsLimiter
// Emitter is events emitter, used to submit discrete events
emitter events.Emitter
// streamer is events sessionstreamer, used to create continuous
// session related streams
streamer events.Streamer
}
// SetCache sets cache used by auth server
func (a *Server) SetCache(clt Cache) {
a.lock.Lock()
defer a.lock.Unlock()
a.cache = clt
}
// GetCache returns cache used by auth server
func (a *Server) GetCache() Cache {
a.lock.RLock()
defer a.lock.RUnlock()
if a.cache == nil {
return &a.Services
}
return a.cache
}
// runPeriodicOperations runs some periodic bookkeeping operations
// performed by auth server
func (a *Server) runPeriodicOperations() {
// run periodic functions with a semi-random period
// to avoid contention on the database in case if there are multiple
// auth servers running - so they don't compete trying
// to update the same resources.
r := rand.New(rand.NewSource(a.GetClock().Now().UnixNano()))
period := defaults.HighResPollingPeriod + time.Duration(r.Intn(int(defaults.HighResPollingPeriod/time.Second)))*time.Second
log.Debugf("Ticking with period: %v.", period)
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-a.closeCtx.Done():
return
case <-ticker.C:
err := a.autoRotateCertAuthorities()
if err != nil {
if trace.IsCompareFailed(err) {
log.Debugf("Cert authority has been updated concurrently: %v.", err)
} else {
log.Errorf("Failed to perform cert rotation check: %v.", err)
}
}
}
}
}
func (a *Server) Close() error {
a.cancelFunc()
if a.bk != nil {
return trace.Wrap(a.bk.Close())
}
return nil
}
func (a *Server) GetClock() clockwork.Clock {
a.lock.RLock()
defer a.lock.RUnlock()
return a.clock
}
// SetClock sets clock, used in tests
func (a *Server) SetClock(clock clockwork.Clock) {
a.lock.Lock()
defer a.lock.Unlock()
a.clock = clock
}
// SetAuditLog sets the server's audit log
func (a *Server) SetAuditLog(auditLog events.IAuditLog) {
a.IAuditLog = auditLog
}
// GetClusterConfig gets ClusterConfig from the backend.
func (a *Server) GetClusterConfig(opts ...services.MarshalOption) (services.ClusterConfig, error) {
return a.GetCache().GetClusterConfig(opts...)
}
// GetClusterName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *Server) GetClusterName(opts ...services.MarshalOption) (services.ClusterName, error) {
return a.GetCache().GetClusterName(opts...)
}
// GetDomainName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *Server) GetDomainName() (string, error) {
clusterName, err := a.GetClusterName()
if err != nil {
return "", trace.Wrap(err)
}
return clusterName.GetClusterName(), nil
}
// LocalCAResponse contains PEM-encoded local CAs.
type LocalCAResponse struct {
// TLSCA is the PEM-encoded TLS certificate authority.
TLSCA []byte `json:"tls_ca"`
}
// GetClusterCACert returns the CAs for the local cluster without signing keys.
func (a *Server) GetClusterCACert() (*LocalCAResponse, error) {
clusterName, err := a.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
// Extract the TLS CA for this cluster.
hostCA, err := a.GetCache().GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, false)
if err != nil {
return nil, trace.Wrap(err)
}
tlsCA, err := hostCA.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
// Marshal to PEM bytes to send the CA over the wire.
pemBytes, err := tlsca.MarshalCertificatePEM(tlsCA.Cert)
if err != nil {
return nil, trace.Wrap(err)
}
return &LocalCAResponse{
TLSCA: pemBytes,
}, nil
}
// GenerateHostCert uses the private key of the CA to sign the public key of the host
// (along with meta data like host ID, node name, roles, and ttl) to generate a host certificate.
func (a *Server) GenerateHostCert(hostPublicKey []byte, hostID, nodeName string, principals []string, clusterName string, roles teleport.Roles, ttl time.Duration) ([]byte, error) {
domainName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
// get the certificate authority that will be signing the public key of the host
ca, err := a.Trust.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: domainName,
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for '%s': %v", domainName, err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// create and sign!
return a.Authority.GenerateHostCert(services.HostCertParams{
PrivateCASigningKey: caPrivateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicHostKey: hostPublicKey,
HostID: hostID,
NodeName: nodeName,
Principals: principals,
ClusterName: clusterName,
Roles: roles,
TTL: ttl,
})
}
// certs is a pair of SSH and TLS certificates
type certs struct {
// ssh is PEM encoded SSH certificate
ssh []byte
// tls is PEM encoded TLS certificate
tls []byte
}
type certRequest struct {
// user is a user to generate certificate for
user services.User
// checker is used to perform RBAC checks.
checker services.AccessChecker
// ttl is Duration of the certificate
ttl time.Duration
// publicKey is RSA public key in authorized_keys format
publicKey []byte
// compatibility is compatibility mode
compatibility string
// overrideRoleTTL is used for requests when the requested TTL should not be
// adjusted based off the role of the user. This is used by tctl to allow
// creating long lived user certs.
overrideRoleTTL bool
// usage is a list of acceptable usages to be encoded in X509 certificate,
// is used to limit ways the certificate can be used, for example
// the cert can be only used against kubernetes endpoint, and not auth endpoint,
// no usage means unrestricted (to keep backwards compatibility)
usage []string
// routeToCluster is an optional teleport cluster name to route the
// certificate requests to, this teleport cluster name will be used to
// route the requests to in case of kubernetes
routeToCluster string
// kubernetesCluster specifies the target kubernetes cluster for TLS
// identities. This can be empty on older Teleport clients.
kubernetesCluster string
// traits hold claim data used to populate a role at runtime.
traits wrappers.Traits
// activeRequests tracks privilege escalation requests applied
// during the construction of the certificate.
activeRequests services.RequestIDs
// appSessionID is the session ID of the application session.
appSessionID string
// appPublicAddr is the public address of the application.
appPublicAddr string
// appClusterName is the name of the cluster this application is in.
appClusterName string
}
// GenerateUserTestCerts is used to generate user certificate, used internally for tests
func (a *Server) GenerateUserTestCerts(key []byte, username string, ttl time.Duration, compatibility, routeToCluster string) ([]byte, []byte, error) {
user, err := a.Identity.GetUser(username, false)
if err != nil {
return nil, nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(user.GetRoles(), a.Access, user.GetTraits())
if err != nil {
return nil, nil, trace.Wrap(err)
}
certs, err := a.generateUserCert(certRequest{
user: user,
ttl: ttl,
compatibility: compatibility,
publicKey: key,
routeToCluster: routeToCluster,
checker: checker,
traits: user.GetTraits(),
})
if err != nil {
return nil, nil, trace.Wrap(err)
}
return certs.ssh, certs.tls, nil
}
// GenerateUserAppTestCert generates an application specific certificate, used
// internally for tests.
func (a *Server) GenerateUserAppTestCert(publicKey []byte, username string, ttl time.Duration, publicAddr string, clusterName string) ([]byte, error) {
user, err := a.Identity.GetUser(username, false)
if err != nil {
return nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(user.GetRoles(), a.Access, user.GetTraits())
if err != nil {
return nil, trace.Wrap(err)
}
certs, err := a.generateUserCert(certRequest{
user: user,
publicKey: publicKey,
checker: checker,
ttl: ttl,
// Set the login to be a random string. Application certificates are never
// used to log into servers but SSH certificate generation code requires a
// principal be in the certificate.
traits: wrappers.Traits(map[string][]string{
teleport.TraitLogins: []string{uuid.New()},
}),
// Only allow this certificate to be used for applications.
usage: []string{teleport.UsageAppsOnly},
// Add in the application routing information.
appSessionID: uuid.New(),
appPublicAddr: publicAddr,
appClusterName: clusterName,
})
if err != nil {
return nil, trace.Wrap(err)
}
return certs.tls, nil
}
// generateUserCert generates user certificates
func (a *Server) generateUserCert(req certRequest) (*certs, error) {
// reuse the same RSA keys for SSH and TLS keys
cryptoPubKey, err := sshutils.CryptoPublicKey(req.publicKey)
if err != nil {
return nil, trace.Wrap(err)
}
// extract the passed in certificate format. if nothing was passed in, fetch
// the certificate format from the role.
certificateFormat, err := utils.CheckCertificateFormatFlag(req.compatibility)
if err != nil {
return nil, trace.Wrap(err)
}
if certificateFormat == teleport.CertificateFormatUnspecified {
certificateFormat = req.checker.CertificateFormat()
}
var sessionTTL time.Duration
var allowedLogins []string
// If the role TTL is ignored, do not restrict session TTL and allowed logins.
// The only caller setting this parameter should be "tctl auth sign".
// Otherwise set the session TTL to the smallest of all roles and
// then only grant access to allowed logins based on that.
if req.overrideRoleTTL {
// Take whatever was passed in. Pass in 0 to CheckLoginDuration so all
// logins are returned for the role set.
sessionTTL = req.ttl
allowedLogins, err = req.checker.CheckLoginDuration(0)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
// Adjust session TTL to the smaller of two values: the session TTL
// requested in tsh or the session TTL for the role.
sessionTTL = req.checker.AdjustSessionTTL(req.ttl)
// Return a list of logins that meet the session TTL limit. This means if
// the requested session TTL is larger than the max session TTL for a login,
// that login will not be included in the list of allowed logins.
allowedLogins, err = req.checker.CheckLoginDuration(sessionTTL)
if err != nil {
return nil, trace.Wrap(err)
}
}
clusterName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
ca, err := a.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: clusterName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
sshCert, err := a.Authority.GenerateUserCert(services.UserCertParams{
PrivateCASigningKey: privateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicUserKey: req.publicKey,
Username: req.user.GetName(),
AllowedLogins: allowedLogins,
TTL: sessionTTL,
Roles: req.checker.RoleNames(),
CertificateFormat: certificateFormat,
PermitPortForwarding: req.checker.CanPortForward(),
PermitAgentForwarding: req.checker.CanForwardAgents(),
PermitX11Forwarding: req.checker.PermitX11Forwarding(),
RouteToCluster: req.routeToCluster,
Traits: req.traits,
ActiveRequests: req.activeRequests,
})
if err != nil {
return nil, trace.Wrap(err)
}
kubeGroups, kubeUsers, err := req.checker.CheckKubeGroupsAndUsers(sessionTTL, req.overrideRoleTTL)
// NotFound errors are acceptable - this user may have no k8s access
// granted and that shouldn't prevent us from issuing a TLS cert.
if err != nil && !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
// Only validate/default kubernetes cluster name for the current teleport
// cluster. If this cert is targeting a trusted teleport cluster, leave all
// the kubernetes cluster validation up to them.
if req.routeToCluster == "" || req.routeToCluster == clusterName {
req.kubernetesCluster, err = kubeutils.CheckOrSetKubeCluster(a.closeCtx, a.Presence, req.kubernetesCluster, clusterName)
if err != nil {
if !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
log.WithError(err).Warning("Failed setting default kubernetes cluster for user login (user did not provide a cluster); leaving KubernetesCluster extension in the TLS certificate empty")
}
}
// generate TLS certificate
tlsAuthority, err := ca.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
identity := tlsca.Identity{
Username: req.user.GetName(),
Groups: req.checker.RoleNames(),
Principals: allowedLogins,
Usage: req.usage,
RouteToCluster: req.routeToCluster,
KubernetesCluster: req.kubernetesCluster,
Traits: req.traits,
KubernetesGroups: kubeGroups,
KubernetesUsers: kubeUsers,
RouteToApp: tlsca.RouteToApp{
SessionID: req.appSessionID,
PublicAddr: req.appPublicAddr,
ClusterName: req.appClusterName,
},
TeleportCluster: clusterName,
}
subject, err := identity.Subject()
if err != nil {
return nil, trace.Wrap(err)
}
certRequest := tlsca.CertificateRequest{
Clock: a.clock,
PublicKey: cryptoPubKey,
Subject: subject,
NotAfter: a.clock.Now().UTC().Add(sessionTTL),
}
tlsCert, err := tlsAuthority.GenerateCertificate(certRequest)
if err != nil {
return nil, trace.Wrap(err)
}
return &certs{ssh: sshCert, tls: tlsCert}, nil
}
// WithUserLock executes function authenticateFn that performs user authentication
// if authenticateFn returns non nil error, the login attempt will be logged in as failed.
// The only exception to this rule is ConnectionProblemError, in case if it occurs
// access will be denied, but login attempt will not be recorded
// this is done to avoid potential user lockouts due to backend failures
// In case if user exceeds defaults.MaxLoginAttempts
// the user account will be locked for defaults.AccountLockInterval
func (a *Server) WithUserLock(username string, authenticateFn func() error) error {
user, err := a.Identity.GetUser(username, false)
if err != nil {
if trace.IsNotFound(err) {
// If user is not found, still call authenticateFn. It should
// always return an error. This prevents username oracles and
// timing attacks.
return authenticateFn()
}
return trace.Wrap(err)
}
status := user.GetStatus()
if status.IsLocked && status.LockExpires.After(a.clock.Now().UTC()) {
return trace.AccessDenied("%v exceeds %v failed login attempts, locked until %v",
user.GetName(), defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
}
fnErr := authenticateFn()
if fnErr == nil {
// upon successful login, reset the failed attempt counter
err = a.DeleteUserLoginAttempts(username)
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
return nil
}
// do not lock user in case if DB is flaky or down
if trace.IsConnectionProblem(err) {
return trace.Wrap(fnErr)
}
// log failed attempt and possibly lock user
attempt := services.LoginAttempt{Time: a.clock.Now().UTC(), Success: false}
err = a.AddUserLoginAttempt(username, attempt, defaults.AttemptTTL)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
loginAttempts, err := a.Identity.GetUserLoginAttempts(username)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
if !services.LastFailed(defaults.MaxLoginAttempts, loginAttempts) {
log.Debugf("%v user has less than %v failed login attempts", username, defaults.MaxLoginAttempts)
return trace.Wrap(fnErr)
}
lockUntil := a.clock.Now().UTC().Add(defaults.AccountLockInterval)
message := fmt.Sprintf("%v exceeds %v failed login attempts, locked until %v",
username, defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
log.Debug(message)
user.SetLocked(lockUntil, "user has exceeded maximum failed login attempts")
err = a.Identity.UpsertUser(user)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
return trace.AccessDenied(message)
}
// PreAuthenticatedSignIn is for 2-way authentication methods like U2F where the password is
// already checked before issuing the second factor challenge
func (a *Server) PreAuthenticatedSignIn(user string, identity tlsca.Identity) (services.WebSession, error) {
roles, traits, err := services.ExtractFromIdentity(a, identity)
if err != nil {
return nil, trace.Wrap(err)
}
sess, err := a.NewWebSession(user, roles, traits)
if err != nil {
return nil, trace.Wrap(err)
}
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (a *Server) U2FSignRequest(user string, password []byte) (*u2f.SignRequest, error) {
cap, err := a.GetAuthPreference()
if err != nil {
return nil, trace.Wrap(err)
}
universalSecondFactor, err := cap.GetU2F()
if err != nil {
return nil, trace.Wrap(err)
}
err = a.WithUserLock(user, func() error {
return a.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
registration, err := a.GetU2FRegistration(user)
if err != nil {
return nil, trace.Wrap(err)
}
challenge, err := u2f.NewChallenge(universalSecondFactor.AppID, universalSecondFactor.Facets)
if err != nil {
return nil, trace.Wrap(err)
}
err = a.UpsertU2FSignChallenge(user, challenge)
if err != nil {
return nil, trace.Wrap(err)
}
u2fSignReq := challenge.SignRequest(*registration)
return u2fSignReq, nil
}
func (a *Server) CheckU2FSignResponse(user string, response *u2f.SignResponse) error {
// before trying to register a user, see U2F is actually setup on the backend
cap, err := a.GetAuthPreference()
if err != nil {
return trace.Wrap(err)
}
_, err = cap.GetU2F()
if err != nil {
return trace.Wrap(err)
}
reg, err := a.GetU2FRegistration(user)
if err != nil {
return trace.Wrap(err)
}
counter, err := a.GetU2FRegistrationCounter(user)
if err != nil {
return trace.Wrap(err)
}
challenge, err := a.GetU2FSignChallenge(user)
if err != nil {
return trace.Wrap(err)
}
newCounter, err := reg.Authenticate(*response, *challenge, counter)
if err != nil {
return trace.Wrap(err)
}
err = a.UpsertU2FRegistrationCounter(user, newCounter)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// ExtendWebSession creates a new web session for a user based on a valid previous sessionID.
// Additional roles are appended to initial roles if there is an approved access request.
func (a *Server) ExtendWebSession(user, prevSessionID, accessRequestID string, identity tlsca.Identity) (services.WebSession, error) {
prevSession, err := a.GetWebSession(user, prevSessionID)
if err != nil {
return nil, trace.Wrap(err)
}
// consider absolute expiry time that may be set for this session
// by some external identity serivce, so we can not renew this session
// any more without extra logic for renewal with external OIDC provider
expiresAt := prevSession.GetExpiryTime()
if !expiresAt.IsZero() && expiresAt.Before(a.clock.Now().UTC()) {
return nil, trace.NotFound("web session has expired")
}
roles, traits, err := services.ExtractFromIdentity(a, identity)
if err != nil {
return nil, trace.Wrap(err)
}
if accessRequestID != "" {
newRoles, requestExpiry, err := a.getRolesAndExpiryFromAccessRequest(user, accessRequestID)
if err != nil {
return nil, trace.Wrap(err)
}
roles = append(roles, newRoles...)
roles = utils.Deduplicate(roles)
// Let session expire with access request expiry.
expiresAt = requestExpiry
}
sess, err := a.NewWebSession(user, roles, traits)
if err != nil {
return nil, trace.Wrap(err)
}
sess.SetExpiryTime(expiresAt)
bearerTokenTTL := utils.MinTTL(utils.ToTTL(a.clock, expiresAt), BearerTokenTTL)
sess.SetBearerTokenExpiryTime(a.clock.Now().UTC().Add(bearerTokenTTL))
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().ExtendWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
func (a *Server) getRolesAndExpiryFromAccessRequest(user, accessRequestID string) ([]string, time.Time, error) {
reqFilter := services.AccessRequestFilter{
User: user,
ID: accessRequestID,
}
reqs, err := a.GetAccessRequests(context.TODO(), reqFilter)
if err != nil {
return nil, time.Time{}, trace.Wrap(err)
}
if len(reqs) < 1 {
return nil, time.Time{}, trace.NotFound("access request %q not found", accessRequestID)
}
req := reqs[0]
if !req.GetState().IsApproved() {
if req.GetState().IsDenied() {
return nil, time.Time{}, trace.AccessDenied("access request %q has been denied", accessRequestID)
}
return nil, time.Time{}, trace.BadParameter("access request %q is awaiting approval", accessRequestID)
}
if err := services.ValidateAccessRequest(a, req); err != nil {
return nil, time.Time{}, trace.Wrap(err)
}
accessExpiry := req.GetAccessExpiry()
if accessExpiry.Before(a.GetClock().Now()) {
return nil, time.Time{}, trace.BadParameter("access request %q has expired", accessRequestID)
}
return req.GetRoles(), accessExpiry, nil
}
// CreateWebSession creates a new web session for user without any
// checks, is used by admins
func (a *Server) CreateWebSession(user string) (services.WebSession, error) {
u, err := a.GetUser(user, false)
if err != nil {
return nil, trace.Wrap(err)
}
sess, err := a.NewWebSession(user, u.GetRoles(), u.GetTraits())
if err != nil {
return nil, trace.Wrap(err)
}
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().GenerateWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
// GenerateTokenRequest is a request to generate auth token
type GenerateTokenRequest struct {
// Token if provided sets the token value, otherwise will be auto generated
Token string `json:"token"`
// Roles is a list of roles this token authenticates as
Roles teleport.Roles `json:"roles"`
// TTL is a time to live for token
TTL time.Duration `json:"ttl"`
// Labels sets token labels, e.g. {env: prod, region: us-west}.
// Labels are later passed to resources that are joining
// e.g. remote clusters and in the future versions, nodes and proxies.
Labels map[string]string `json:"labels"`
}
// CheckAndSetDefaults checks and sets default values of request
func (req *GenerateTokenRequest) CheckAndSetDefaults() error {
for _, role := range req.Roles {
if err := role.Check(); err != nil {
return trace.Wrap(err)
}
}
if req.TTL == 0 {
req.TTL = defaults.ProvisioningTokenTTL
}
if req.Token == "" {
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return trace.Wrap(err)
}
req.Token = token
}
return nil
}
// GenerateToken generates multi-purpose authentication token.
func (a *Server) GenerateToken(ctx context.Context, req GenerateTokenRequest) (string, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return "", trace.Wrap(err)
}
token, err := services.NewProvisionToken(req.Token, req.Roles, a.clock.Now().UTC().Add(req.TTL))
if err != nil {
return "", trace.Wrap(err)
}
if len(req.Labels) != 0 {
meta := token.GetMetadata()
meta.Labels = req.Labels
token.SetMetadata(meta)
}
if err := a.Provisioner.UpsertToken(token); err != nil {
return "", trace.Wrap(err)
}
user := clientUsername(ctx)
for _, role := range req.Roles {
if role == teleport.RoleTrustedCluster {
if err := a.emitter.EmitAuditEvent(ctx, &events.TrustedClusterTokenCreate{
Metadata: events.Metadata{
Type: events.TrustedClusterTokenCreateEvent,
Code: events.TrustedClusterTokenCreateCode,
},
UserMetadata: events.UserMetadata{
User: user,
},
}); err != nil {
log.WithError(err).Warn("Failed to emit trusted cluster token create event.")
}
}
}
return req.Token, nil
}
// ExtractHostID returns host id based on the hostname
func ExtractHostID(hostName string, clusterName string) (string, error) {
suffix := "." + clusterName
if !strings.HasSuffix(hostName, suffix) {
return "", trace.BadParameter("expected suffix %q in %q", suffix, hostName)
}
return strings.TrimSuffix(hostName, suffix), nil
}
// HostFQDN consits of host UUID and cluster name joined via .
func HostFQDN(hostUUID, clusterName string) string {
return fmt.Sprintf("%v.%v", hostUUID, clusterName)
}
// GenerateServerKeysRequest is a request to generate server keys
type GenerateServerKeysRequest struct {
// HostID is a unique ID of the host
HostID string `json:"host_id"`
// NodeName is a user friendly host name
NodeName string `json:"node_name"`
// Roles is a list of roles assigned to node
Roles teleport.Roles `json:"roles"`
// AdditionalPrincipals is a list of additional principals
// to include in OpenSSH and X509 certificates
AdditionalPrincipals []string `json:"additional_principals"`
// DNSNames is a list of DNS names
// to include in the x509 client certificate
DNSNames []string `json:"dns_names"`
// PublicTLSKey is a PEM encoded public key
// used for TLS setup
PublicTLSKey []byte `json:"public_tls_key"`
// PublicSSHKey is a SSH encoded public key,
// if present will be signed as a return value
// otherwise, new public/private key pair will be generated
PublicSSHKey []byte `json:"public_ssh_key"`
// RemoteAddr is the IP address of the remote host requesting a host
// certificate. RemoteAddr is used to replace 0.0.0.0 in the list of
// additional principals.
RemoteAddr string `json:"remote_addr"`
// Rotation allows clients to send the certificate authority rotation state
// expected by client of the certificate authority backends, so auth servers
// can avoid situation when clients request certs assuming one
// state, and auth servers issue another
Rotation *services.Rotation `json:"rotation,omitempty"`
// NoCache is argument that only local callers can supply to bypass cache
NoCache bool `json:"-"`
}
// CheckAndSetDefaults checks and sets default values
func (req *GenerateServerKeysRequest) CheckAndSetDefaults() error {
if req.HostID == "" {
return trace.BadParameter("missing parameter HostID")
}
if len(req.Roles) != 1 {
return trace.BadParameter("expected only one system role, got %v", len(req.Roles))
}
return nil
}
// GenerateServerKeys generates new host private keys and certificates (signed
// by the host certificate authority) for a node.
func (a *Server) GenerateServerKeys(req GenerateServerKeysRequest) (*PackedKeys, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
if err := a.limiter.AcquireConnection(req.Roles.String()); err != nil {
generateThrottledRequestsCount.Inc()
log.Debugf("Node %q [%v] is rate limited: %v.", req.NodeName, req.HostID, req.Roles)
return nil, trace.Wrap(err)
}
defer a.limiter.ReleaseConnection(req.Roles.String())
// only observe latencies for non-throttled requests
start := a.clock.Now()
defer generateRequestsLatencies.Observe(time.Since(start).Seconds())
generateRequestsCount.Inc()
generateRequestsCurrent.Inc()
defer generateRequestsCurrent.Dec()
clusterName, err := a.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
// If the request contains 0.0.0.0, this implies an advertise IP was not
// specified on the node. Try and guess what the address by replacing 0.0.0.0
// with the RemoteAddr as known to the Auth Server.
if utils.SliceContainsStr(req.AdditionalPrincipals, defaults.AnyAddress) {
remoteHost, err := utils.Host(req.RemoteAddr)
if err != nil {
return nil, trace.Wrap(err)
}
req.AdditionalPrincipals = utils.ReplaceInSlice(
req.AdditionalPrincipals,
defaults.AnyAddress,
remoteHost)
}
var cryptoPubKey crypto.PublicKey
var privateKeyPEM, pubSSHKey []byte
if req.PublicSSHKey != nil || req.PublicTLSKey != nil {
_, _, _, _, err := ssh.ParseAuthorizedKey(req.PublicSSHKey)
if err != nil {
return nil, trace.BadParameter("failed to parse SSH public key")
}
pubSSHKey = req.PublicSSHKey
cryptoPubKey, err = tlsca.ParsePublicKeyPEM(req.PublicTLSKey)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
// generate private key
privateKeyPEM, pubSSHKey, err = a.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
// reuse the same RSA keys for SSH and TLS keys
cryptoPubKey, err = sshutils.CryptoPublicKey(pubSSHKey)
if err != nil {
return nil, trace.Wrap(err)
}
}
// get the certificate authority that will be signing the public key of the host,
client := a.GetCache()
if req.NoCache {
client = &a.Services
}
ca, err := client.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for %q: %v", clusterName.GetClusterName(), err)
}
// could be a couple of scenarios, either client data is out of sync,
// or auth server is out of sync, either way, for now check that
// cache is out of sync, this will result in higher read rate
// to the backend, which is a fine tradeoff
if !req.NoCache && req.Rotation != nil && !req.Rotation.Matches(ca.GetRotation()) {
log.Debugf("Client sent rotation state %v, cache state is %v, using state from the DB.", req.Rotation, ca.GetRotation())
ca, err = a.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for %q: %v", clusterName.GetClusterName(), err)
}
if !req.Rotation.Matches(ca.GetRotation()) {
return nil, trace.BadParameter("the client expected state is out of sync, server rotation state: %v, client rotation state: %v, re-register the client from scratch to fix the issue.", ca.GetRotation(), req.Rotation)
}
}
tlsAuthority, err := ca.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// generate hostSSH certificate
hostSSHCert, err := a.Authority.GenerateHostCert(services.HostCertParams{
PrivateCASigningKey: caPrivateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicHostKey: pubSSHKey,
HostID: req.HostID,
NodeName: req.NodeName,
ClusterName: clusterName.GetClusterName(),
Roles: req.Roles,
Principals: req.AdditionalPrincipals,
})
if err != nil {
return nil, trace.Wrap(err)
}
// generate host TLS certificate
identity := tlsca.Identity{
Username: HostFQDN(req.HostID, clusterName.GetClusterName()),
Groups: req.Roles.StringSlice(),
TeleportCluster: clusterName.GetClusterName(),
}
subject, err := identity.Subject()
if err != nil {
return nil, trace.Wrap(err)
}
certRequest := tlsca.CertificateRequest{
Clock: a.clock,
PublicKey: cryptoPubKey,
Subject: subject,
NotAfter: a.clock.Now().UTC().Add(defaults.CATTL),
DNSNames: append([]string{}, req.AdditionalPrincipals...),
}
// HTTPS requests need to specify DNS name that should be present in the
// certificate as one of the DNS Names. It is not known in advance,
// that is why there is a default one for all certificates
if req.Roles.Include(teleport.RoleAuth) || req.Roles.Include(teleport.RoleAdmin) || req.Roles.Include(teleport.RoleApp) {
certRequest.DNSNames = append(certRequest.DNSNames, "*."+teleport.APIDomain, teleport.APIDomain)
}
// Unlike additional principals, DNS Names is x509 specific and is limited
// to services with TLS endpoints (e.g. auth, proxies, kubernetes)
if req.Roles.Include(teleport.RoleAuth) || req.Roles.Include(teleport.RoleAdmin) || req.Roles.Include(teleport.RoleProxy) || req.Roles.Include(teleport.RoleKube) {
certRequest.DNSNames = append(certRequest.DNSNames, req.DNSNames...)
}
hostTLSCert, err := tlsAuthority.GenerateCertificate(certRequest)
if err != nil {
return nil, trace.Wrap(err)
}
return &PackedKeys{
Key: privateKeyPEM,
Cert: hostSSHCert,
TLSCert: hostTLSCert,
TLSCACerts: services.TLSCerts(ca),
SSHCACerts: ca.GetCheckingKeys(),
}, nil
}
// ValidateToken takes a provisioning token value and finds if it's valid. Returns
// a list of roles this token allows its owner to assume and token labels, or an error if the token
// cannot be found.
func (a *Server) ValidateToken(token string) (teleport.Roles, map[string]string, error) {
tkns, err := a.GetCache().GetStaticTokens()
if err != nil {
return nil, nil, trace.Wrap(err)
}
// First check if the token is a static token. If it is, return right away.
// Static tokens have no expiration.
for _, st := range tkns.GetStaticTokens() {
if subtle.ConstantTimeCompare([]byte(st.GetName()), []byte(token)) == 1 {
return st.GetRoles(), nil, nil
}
}
// If it's not a static token, check if it's a ephemeral token in the backend.
// If a ephemeral token is found, make sure it's still valid.
tok, err := a.GetCache().GetToken(token)
if err != nil {
return nil, nil, trace.Wrap(err)
}
if !a.checkTokenTTL(tok) {
return nil, nil, trace.AccessDenied("token expired")
}
return tok.GetRoles(), tok.GetMetadata().Labels, nil
}
// checkTokenTTL checks if the token is still valid. If it is not, the token
// is removed from the backend and returns false. Otherwise returns true.
func (a *Server) checkTokenTTL(tok services.ProvisionToken) bool {
now := a.clock.Now().UTC()
if tok.Expiry().Before(now) {
err := a.DeleteToken(tok.GetName())
if err != nil {
if !trace.IsNotFound(err) {
log.Warnf("Unable to delete token from backend: %v.", err)
}
}
return false
}
return true
}
// RegisterUsingTokenRequest is a request to register with
// auth server using authentication token
type RegisterUsingTokenRequest struct {
// HostID is a unique host ID, usually a UUID
HostID string `json:"hostID"`
// NodeName is a node name
NodeName string `json:"node_name"`
// Role is a system role, e.g. Proxy
Role teleport.Role `json:"role"`
// Token is an authentication token
Token string `json:"token"`
// AdditionalPrincipals is a list of additional principals
AdditionalPrincipals []string `json:"additional_principals"`
// DNSNames is a list of DNS names to include in the x509 client certificate
DNSNames []string `json:"dns_names"`
// PublicTLSKey is a PEM encoded public key
// used for TLS setup
PublicTLSKey []byte `json:"public_tls_key"`
// PublicSSHKey is a SSH encoded public key,
// if present will be signed as a return value
// otherwise, new public/private key pair will be generated
PublicSSHKey []byte `json:"public_ssh_key"`
// RemoteAddr is the remote address of the host requesting a host certificate.
// It is used to replace 0.0.0.0 in the list of additional principals.
RemoteAddr string `json:"remote_addr"`
}
// CheckAndSetDefaults checks for errors and sets defaults
func (r *RegisterUsingTokenRequest) CheckAndSetDefaults() error {
if r.HostID == "" {
return trace.BadParameter("missing parameter HostID")
}
if r.Token == "" {
return trace.BadParameter("missing parameter Token")
}
if err := r.Role.Check(); err != nil {
return trace.Wrap(err)
}
return nil
}
// RegisterUsingToken adds a new node to the Teleport cluster using previously issued token.
// A node must also request a specific role (and the role must match one of the roles
// the token was generated for).
//
// If a token was generated with a TTL, it gets enforced (can't register new nodes after TTL expires)
// If a token was generated with a TTL=0, it means it's a single-use token and it gets destroyed
// after a successful registration.
func (a *Server) RegisterUsingToken(req RegisterUsingTokenRequest) (*PackedKeys, error) {
log.Infof("Node %q [%v] is trying to join with role: %v.", req.NodeName, req.HostID, req.Role)
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
// make sure the token is valid
roles, _, err := a.ValidateToken(req.Token)
if err != nil {
log.Warningf("%q [%v] can not join the cluster with role %s, token error: %v", req.NodeName, req.HostID, req.Role, err)
return nil, trace.AccessDenied(fmt.Sprintf("%q [%v] can not join the cluster with role %s, the token is not valid", req.NodeName, req.HostID, req.Role))
}
// make sure the caller is requested the role allowed by the token
if !roles.Include(req.Role) {
msg := fmt.Sprintf("node %q [%v] can not join the cluster, the token does not allow %q role", req.NodeName, req.HostID, req.Role)
log.Warn(msg)
return nil, trace.BadParameter(msg)
}
// generate and return host certificate and keys
keys, err := a.GenerateServerKeys(GenerateServerKeysRequest{
HostID: req.HostID,
NodeName: req.NodeName,
Roles: teleport.Roles{req.Role},
AdditionalPrincipals: req.AdditionalPrincipals,
PublicTLSKey: req.PublicTLSKey,
PublicSSHKey: req.PublicSSHKey,
RemoteAddr: req.RemoteAddr,
DNSNames: req.DNSNames,
})
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("Node %q [%v] has joined the cluster.", req.NodeName, req.HostID)
return keys, nil
}
func (a *Server) RegisterNewAuthServer(token string) error {
tok, err := a.Provisioner.GetToken(token)
if err != nil {
return trace.Wrap(err)
}
if !tok.GetRoles().Include(teleport.RoleAuth) {
return trace.AccessDenied("role does not match")
}
if err := a.DeleteToken(token); err != nil {
return trace.Wrap(err)
}
return nil
}
func (a *Server) DeleteToken(token string) (err error) {
tkns, err := a.GetStaticTokens()
if err != nil {
return trace.Wrap(err)
}
// is this a static token?
for _, st := range tkns.GetStaticTokens() {
if subtle.ConstantTimeCompare([]byte(st.GetName()), []byte(token)) == 1 {
return trace.BadParameter("token %s is statically configured and cannot be removed", token)
}
}
// delete reset password token:
if err = a.Identity.DeleteResetPasswordToken(context.TODO(), token); err == nil {
return nil
}
// delete node token:
if err = a.Provisioner.DeleteToken(token); err == nil {
return nil
}
return trace.Wrap(err)
}
// GetTokens returns all tokens (machine provisioning ones and user invitation tokens). Machine
// tokens usually have "node roles", like auth,proxy,node and user invitation tokens have 'signup' role
func (a *Server) GetTokens(opts ...services.MarshalOption) (tokens []services.ProvisionToken, err error) {
// get node tokens:
tokens, err = a.Provisioner.GetTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// get static tokens:
tkns, err := a.GetStaticTokens()
if err != nil && !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
if err == nil {
tokens = append(tokens, tkns.GetStaticTokens()...)
}
// get reset password tokens:
resetPasswordTokens, err := a.Identity.GetResetPasswordTokens(context.TODO())
if err != nil {
return nil, trace.Wrap(err)
}
// convert reset password tokens to machine tokens:
for _, t := range resetPasswordTokens {
roles := teleport.Roles{teleport.RoleSignup}
tok, err := services.NewProvisionToken(t.GetName(), roles, t.Expiry())
if err != nil {
return nil, trace.Wrap(err)
}
tokens = append(tokens, tok)
}
return tokens, nil
}
func (a *Server) NewWebSession(username string, roles []string, traits wrappers.Traits) (services.WebSession, error) {
user, err := a.GetUser(username, false)
if err != nil {
return nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(roles, a.Access, traits)
if err != nil {
return nil, trace.Wrap(err)
}
priv, pub, err := a.GetNewKeyPairFromPool()
if err != nil {
return nil, trace.Wrap(err)
}
sessionTTL := checker.AdjustSessionTTL(defaults.CertDuration)
certs, err := a.generateUserCert(certRequest{
user: user,
ttl: sessionTTL,
publicKey: pub,
checker: checker,
traits: traits,
})
if err != nil {
return nil, trace.Wrap(err)
}
token, err := utils.CryptoRandomHex(SessionTokenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerToken, err := utils.CryptoRandomHex(SessionTokenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerTokenTTL := utils.MinTTL(sessionTTL, BearerTokenTTL)
return services.NewWebSession(token, services.KindWebSession, services.KindWebSession, services.WebSessionSpecV2{
User: user.GetName(),
Priv: priv,
Pub: certs.ssh,
TLSCert: certs.tls,
Expires: a.clock.Now().UTC().Add(sessionTTL),
BearerToken: bearerToken,
BearerTokenExpires: a.clock.Now().UTC().Add(bearerTokenTTL),
}), nil
}
func (a *Server) UpsertWebSession(user string, sess services.WebSession) error {
return a.Identity.UpsertWebSession(user, sess.GetName(), sess)
}
func (a *Server) GetWebSession(userName string, id string) (services.WebSession, error) {
return a.Identity.GetWebSession(userName, id)
}
func (a *Server) GetWebSessionInfo(userName string, id string) (services.WebSession, error) {
sess, err := a.Identity.GetWebSession(userName, id)
if err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (a *Server) DeleteNamespace(namespace string) error {
if namespace == defaults.Namespace {
return trace.AccessDenied("can't delete default namespace")
}
nodes, err := a.Presence.GetNodes(namespace, services.SkipValidation())
if err != nil {
return trace.Wrap(err)
}
if len(nodes) != 0 {
return trace.BadParameter("can't delete namespace %v that has %v registered nodes", namespace, len(nodes))
}
return a.Presence.DeleteNamespace(namespace)
}
func (a *Server) DeleteWebSession(user string, id string) error {
return trace.Wrap(a.Identity.DeleteWebSession(user, id))
}
// NewWatcher returns a new event watcher. In case of an auth server
// this watcher will return events as seen by the auth server's
// in memory cache, not the backend.
func (a *Server) NewWatcher(ctx context.Context, watch services.Watch) (services.Watcher, error) {
return a.GetCache().NewWatcher(ctx, watch)
}
// DeleteRole deletes a role by name of the role.
func (a *Server) DeleteRole(ctx context.Context, name string) error {
// check if this role is used by CA or Users
users, err := a.Identity.GetUsers(false)
if err != nil {
return trace.Wrap(err)
}
for _, u := range users {
for _, r := range u.GetRoles() {
if r == name {
// Mask the actual error here as it could be used to enumerate users
// within the system.
log.Warnf("Failed to delete role: role %v is used by user %v.", name, u.GetName())
return trace.BadParameter("failed to delete role that still in use by a user. Check system server logs for more details.")
}
}
}
// check if it's used by some external cert authorities, e.g.
// cert authorities related to external cluster
cas, err := a.Trust.GetCertAuthorities(services.UserCA, false)
if err != nil {
return trace.Wrap(err)
}
for _, a := range cas {
for _, r := range a.GetRoles() {
if r == name {
// Mask the actual error here as it could be used to enumerate users
// within the system.
log.Warnf("Failed to delete role: role %v is used by user cert authority %v", name, a.GetClusterName())
return trace.BadParameter("failed to delete role that still in use by a user. Check system server logs for more details.")
}
}
}
if err := a.Access.DeleteRole(ctx, name); err != nil {
return trace.Wrap(err)
}
err = a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleDelete{
Metadata: events.Metadata{
Type: events.RoleDeletedEvent,
Code: events.RoleDeletedCode,
},
UserMetadata: events.UserMetadata{
User: clientUsername(ctx),
},
ResourceMetadata: events.ResourceMetadata{
Name: name,
},
})
if err != nil {
log.WithError(err).Warnf("Failed to emit role deleted event.")
}
return nil
}
// UpsertRole creates or updates role.
func (a *Server) upsertRole(ctx context.Context, role services.Role) error {
if err := a.UpsertRole(ctx, role); err != nil {
return trace.Wrap(err)
}
err := a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleCreate{
Metadata: events.Metadata{
Type: events.RoleCreatedEvent,
Code: events.RoleCreatedCode,
},
UserMetadata: events.UserMetadata{
User: clientUsername(ctx),
},
ResourceMetadata: events.ResourceMetadata{
Name: role.GetName(),
},
})
if err != nil {
log.WithError(err).Warnf("Failed to emit role create event.")
}
return nil
}
func (a *Server) CreateAccessRequest(ctx context.Context, req services.AccessRequest) error {
err := services.ValidateAccessRequest(a, req,
// if request is in state pending, role expansion must be applied
services.ExpandRoles(req.GetState().IsPending()),
// always apply system annotations before storing new requests
services.ApplySystemAnnotations(true),
)
if err != nil {
return trace.Wrap(err)
}
ttl, err := a.calculateMaxAccessTTL(req)
if err != nil {
return trace.Wrap(err)
}
now := a.clock.Now().UTC()
req.SetCreationTime(now)
exp := now.Add(ttl)
// Set acccess expiry if an allowable default was not provided.
if req.GetAccessExpiry().Before(now) || req.GetAccessExpiry().After(exp) {
req.SetAccessExpiry(exp)
}
// By default, resource expiry should match access expiry.
req.SetExpiry(req.GetAccessExpiry())
// If the access-request is in a pending state, then the expiry of the underlying resource
// is capped to to PendingAccessDuration in order to limit orphaned access requests.
if req.GetState().IsPending() {
pexp := now.Add(defaults.PendingAccessDuration)
if pexp.Before(req.Expiry()) {
req.SetExpiry(pexp)
}
}
if err := a.DynamicAccess.CreateAccessRequest(ctx, req); err != nil {
return trace.Wrap(err)
}
err = a.emitter.EmitAuditEvent(a.closeCtx, &events.AccessRequestCreate{
Metadata: events.Metadata{
Type: events.AccessRequestCreateEvent,
Code: events.AccessRequestCreateCode,
},
UserMetadata: events.UserMetadata{
User: req.GetUser(),
},
Roles: req.GetRoles(),
RequestID: req.GetName(),
RequestState: req.GetState().String(),
Reason: req.GetRequestReason(),
})
return trace.Wrap(err)
}
func (a *Server) SetAccessRequestState(ctx context.Context, params services.AccessRequestUpdate) error {
if err := a.DynamicAccess.SetAccessRequestState(ctx, params); err != nil {
return trace.Wrap(err)
}
event := &events.AccessRequestCreate{
Metadata: events.Metadata{
Type: events.AccessRequestUpdateEvent,
Code: events.AccessRequestUpdateCode,
},
ResourceMetadata: events.ResourceMetadata{
UpdatedBy: clientUsername(ctx),
},
RequestID: params.RequestID,
RequestState: params.State.String(),
Reason: params.Reason,
Roles: params.Roles,
}
if delegator := getDelegator(ctx); delegator != "" {
event.Delegator = delegator
}
if len(params.Annotations) > 0 {
annotations, err := events.EncodeMapStrings(params.Annotations)
if err != nil {
log.WithError(err).Debugf("Failed to encode access request annotations.")
} else {
event.Annotations = annotations
}
}
err := a.emitter.EmitAuditEvent(a.closeCtx, event)
if err != nil {
log.WithError(err).Warn("Failed to emit access request update event.")
}
return trace.Wrap(err)
}
// calculateMaxAccessTTL determines the maximum allowable TTL for a given access request
// based on the MaxSessionTTLs of the roles being requested (a access request's life cannot
// exceed the smallest allowable MaxSessionTTL value of the roles that it requests).
func (a *Server) calculateMaxAccessTTL(req services.AccessRequest) (time.Duration, error) {
minTTL := defaults.MaxAccessDuration
for _, roleName := range req.GetRoles() {
role, err := a.GetRole(roleName)
if err != nil {
return 0, trace.Wrap(err)
}
roleTTL := time.Duration(role.GetOptions().MaxSessionTTL)
if roleTTL > 0 && roleTTL < minTTL {
minTTL = roleTTL
}
}
return minTTL, nil
}
// NewKeepAliver returns a new instance of keep aliver
func (a *Server) NewKeepAliver(ctx context.Context) (services.KeepAliver, error) {
cancelCtx, cancel := context.WithCancel(ctx)
k := &authKeepAliver{
a: a,
ctx: cancelCtx,
cancel: cancel,
keepAlivesC: make(chan services.KeepAlive),
}
go k.forwardKeepAlives()
return k, nil
}
// GetCertAuthority returns certificate authority by given id. Parameter loadSigningKeys
// controls if signing keys are loaded
func (a *Server) GetCertAuthority(id services.CertAuthID, loadSigningKeys bool, opts ...services.MarshalOption) (services.CertAuthority, error) {
return a.GetCache().GetCertAuthority(id, loadSigningKeys, opts...)
}
// GetCertAuthorities returns a list of authorities of a given type
// loadSigningKeys controls whether signing keys should be loaded or not
func (a *Server) GetCertAuthorities(caType services.CertAuthType, loadSigningKeys bool, opts ...services.MarshalOption) ([]services.CertAuthority, error) {
return a.GetCache().GetCertAuthorities(caType, loadSigningKeys, opts...)
}
// GetStaticTokens gets the list of static tokens used to provision nodes.
func (a *Server) GetStaticTokens() (services.StaticTokens, error) {
return a.GetCache().GetStaticTokens()
}
// GetToken finds and returns token by ID
func (a *Server) GetToken(token string) (services.ProvisionToken, error) {
return a.GetCache().GetToken(token)
}
// GetRoles is a part of auth.AccessPoint implementation
func (a *Server) GetRoles() ([]services.Role, error) {
return a.GetCache().GetRoles()
}
// GetRole is a part of auth.AccessPoint implementation
func (a *Server) GetRole(name string) (services.Role, error) {
return a.GetCache().GetRole(name)
}
// GetNamespace returns namespace
func (a *Server) GetNamespace(name string) (*services.Namespace, error) {
return a.GetCache().GetNamespace(name)
}
// GetNamespaces is a part of auth.AccessPoint implementation
func (a *Server) GetNamespaces() ([]services.Namespace, error) {
return a.GetCache().GetNamespaces()
}
// GetNodes is a part of auth.AccessPoint implementation
func (a *Server) GetNodes(namespace string, opts ...services.MarshalOption) ([]services.Server, error) {
return a.GetCache().GetNodes(namespace, opts...)
}
// GetReverseTunnels is a part of auth.AccessPoint implementation
func (a *Server) GetReverseTunnels(opts ...services.MarshalOption) ([]services.ReverseTunnel, error) {
return a.GetCache().GetReverseTunnels(opts...)
}
// GetProxies is a part of auth.AccessPoint implementation
func (a *Server) GetProxies() ([]services.Server, error) {
return a.GetCache().GetProxies()
}
// GetUser is a part of auth.AccessPoint implementation.
func (a *Server) GetUser(name string, withSecrets bool) (user services.User, err error) {
return a.GetCache().GetUser(name, withSecrets)
}
// GetUsers is a part of auth.AccessPoint implementation
func (a *Server) GetUsers(withSecrets bool) (users []services.User, err error) {
return a.GetCache().GetUsers(withSecrets)
}
// GetTunnelConnections is a part of auth.AccessPoint implementation
// GetTunnelConnections are not using recent cache as they are designed
// to be called periodically and always return fresh data
func (a *Server) GetTunnelConnections(clusterName string, opts ...services.MarshalOption) ([]services.TunnelConnection, error) {
return a.GetCache().GetTunnelConnections(clusterName, opts...)
}
// GetAllTunnelConnections is a part of auth.AccessPoint implementation
// GetAllTunnelConnections are not using recent cache, as they are designed
// to be called periodically and always return fresh data
func (a *Server) GetAllTunnelConnections(opts ...services.MarshalOption) (conns []services.TunnelConnection, err error) {
return a.GetCache().GetAllTunnelConnections(opts...)
}
// CreateAuditStream creates audit event stream
func (a *Server) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) {
streamer, err := a.modeStreamer()
if err != nil {
return nil, trace.Wrap(err)
}
return streamer.CreateAuditStream(ctx, sid)
}
// ResumeAuditStream resumes the stream that has been created
func (a *Server) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) {
streamer, err := a.modeStreamer()
if err != nil {
return nil, trace.Wrap(err)
}
return streamer.ResumeAuditStream(ctx, sid, uploadID)
}
// modeStreamer creates streamer based on the event mode
func (a *Server) modeStreamer() (events.Streamer, error) {
clusterConfig, err := a.GetClusterConfig()
if err != nil {
return nil, trace.Wrap(err)
}
mode := clusterConfig.GetSessionRecording()
// In sync mode, auth server forwards session control to the event log
// in addition to sending them and data events to the record storage.
if services.IsRecordSync(mode) {
return events.NewTeeStreamer(a.streamer, a.emitter), nil
}
// In async mode, clients submit session control events
// during the session in addition to writing a local
// session recording to be uploaded at the end of the session,
// so forwarding events here will result in duplicate events.
return a.streamer, nil
}
// GetAppServers is a part of the auth.AccessPoint implementation.
func (a *Server) GetAppServers(ctx context.Context, namespace string, opts ...services.MarshalOption) ([]services.Server, error) {
return a.GetCache().GetAppServers(ctx, namespace, opts...)
}
// GetAppSession is a part of the auth.AccessPoint implementation.
func (a *Server) GetAppSession(ctx context.Context, req services.GetAppSessionRequest) (services.WebSession, error) {
return a.GetCache().GetAppSession(ctx, req)
}
// authKeepAliver is a keep aliver using auth server directly
type authKeepAliver struct {
sync.RWMutex
a *Server
ctx context.Context
cancel context.CancelFunc
keepAlivesC chan services.KeepAlive
err error
}
// KeepAlives returns a channel accepting keep alive requests
func (k *authKeepAliver) KeepAlives() chan<- services.KeepAlive {
return k.keepAlivesC
}
func (k *authKeepAliver) forwardKeepAlives() {
for {
select {
case <-k.a.closeCtx.Done():
k.Close()
return
case <-k.ctx.Done():
return
case keepAlive := <-k.keepAlivesC:
err := k.a.KeepAliveServer(k.ctx, keepAlive)
if err != nil {
k.closeWithError(err)
return
}
}
}
}
func (k *authKeepAliver) closeWithError(err error) {
k.Close()
k.Lock()
defer k.Unlock()
k.err = err
}
// Error returns the error if keep aliver
// has been closed
func (k *authKeepAliver) Error() error {
k.RLock()
defer k.RUnlock()
return k.err
}
// Done returns channel that is closed whenever
// keep aliver is closed
func (k *authKeepAliver) Done() <-chan struct{} {
return k.ctx.Done()
}
// Close closes keep aliver and cancels all goroutines
func (k *authKeepAliver) Close() error {
k.cancel()
return nil
}
const (
// BearerTokenTTL specifies standard bearer token to exist before
// it has to be renewed by the client
BearerTokenTTL = 10 * time.Minute
// TokenLenBytes is len in bytes of the invite token
TokenLenBytes = 16
// SessionTokenBytes is the number of bytes of a web or application session.
SessionTokenBytes = 32
)
// oidcClient is internal structure that stores OIDC client and its config
type oidcClient struct {
client *oidc.Client
config oidc.ClientConfig
}
// samlProvider is internal structure that stores SAML client and its config
type samlProvider struct {
provider *saml2.SAMLServiceProvider
connector services.SAMLConnector
}
// githubClient is internal structure that stores Github OAuth 2client and its config
type githubClient struct {
client *oauth2.Client
config oauth2.Config
}
// oidcConfigsEqual returns true if the provided OIDC configs are equal
func oidcConfigsEqual(a, b oidc.ClientConfig) bool {
if a.RedirectURL != b.RedirectURL {
return false
}
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
return true
}
// oauth2ConfigsEqual returns true if the provided OAuth2 configs are equal
func oauth2ConfigsEqual(a, b oauth2.Config) bool {
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if a.RedirectURL != b.RedirectURL {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
if a.AuthURL != b.AuthURL {
return false
}
if a.TokenURL != b.TokenURL {
return false
}
if a.AuthMethod != b.AuthMethod {
return false
}
return true
}
// isHTTPS checks if the scheme for a URL is https or not.
func isHTTPS(u string) error {
earl, err := url.Parse(u)
if err != nil {
return trace.Wrap(err)
}
if earl.Scheme != "https" {
return trace.BadParameter("expected scheme https, got %q", earl.Scheme)
}
return nil
}
func init() {
// Metrics have to be registered to be exposed:
prometheus.MustRegister(generateRequestsCount)
prometheus.MustRegister(generateThrottledRequestsCount)
prometheus.MustRegister(generateRequestsCurrent)
prometheus.MustRegister(generateRequestsLatencies)
}
Change log about missing kube clusters on login to debug (#4935)
This is a totally OK situation in clusters without k8s integration, so
it shouldn't be a warning.
/*
Copyright 2015-2019 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package auth implements certificate signing authority and access control server
// Authority server is composed of several parts:
//
// * Authority server itself that implements signing and acl logic
// * HTTP server wrapper for authority server
// * HTTP client wrapper
//
package auth
import (
"context"
"crypto"
"crypto/subtle"
"fmt"
"math/rand"
"net/url"
"strings"
"sync"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
kubeutils "github.com/gravitational/teleport/lib/kube/utils"
"github.com/gravitational/teleport/lib/limiter"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/services/local"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/sshca"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/lib/wrappers"
"github.com/pborman/uuid"
"github.com/coreos/go-oidc/oauth2"
"github.com/coreos/go-oidc/oidc"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/prometheus/client_golang/prometheus"
saml2 "github.com/russellhaering/gosaml2"
"github.com/tstranex/u2f"
"golang.org/x/crypto/ssh"
)
// ServerOption allows setting options as functional arguments to Server
type ServerOption func(*Server)
// NewServer creates and configures a new Server instance
func NewServer(cfg *InitConfig, opts ...ServerOption) (*Server, error) {
if cfg.Trust == nil {
cfg.Trust = local.NewCAService(cfg.Backend)
}
if cfg.Presence == nil {
cfg.Presence = local.NewPresenceService(cfg.Backend)
}
if cfg.Provisioner == nil {
cfg.Provisioner = local.NewProvisioningService(cfg.Backend)
}
if cfg.Identity == nil {
cfg.Identity = local.NewIdentityService(cfg.Backend)
}
if cfg.Access == nil {
cfg.Access = local.NewAccessService(cfg.Backend)
}
if cfg.DynamicAccess == nil {
cfg.DynamicAccess = local.NewDynamicAccessService(cfg.Backend)
}
if cfg.ClusterConfiguration == nil {
cfg.ClusterConfiguration = local.NewClusterConfigurationService(cfg.Backend)
}
if cfg.Events == nil {
cfg.Events = local.NewEventsService(cfg.Backend)
}
if cfg.AuditLog == nil {
cfg.AuditLog = events.NewDiscardAuditLog()
}
if cfg.Emitter == nil {
cfg.Emitter = events.NewDiscardEmitter()
}
if cfg.Streamer == nil {
cfg.Streamer = events.NewDiscardEmitter()
}
limiter, err := limiter.NewConnectionsLimiter(limiter.Config{
MaxConnections: defaults.LimiterMaxConcurrentSignatures,
})
if err != nil {
return nil, trace.Wrap(err)
}
closeCtx, cancelFunc := context.WithCancel(context.TODO())
as := Server{
bk: cfg.Backend,
limiter: limiter,
Authority: cfg.Authority,
AuthServiceName: cfg.AuthServiceName,
oidcClients: make(map[string]*oidcClient),
samlProviders: make(map[string]*samlProvider),
githubClients: make(map[string]*githubClient),
caSigningAlg: cfg.CASigningAlg,
cancelFunc: cancelFunc,
closeCtx: closeCtx,
emitter: cfg.Emitter,
streamer: cfg.Streamer,
Services: Services{
Trust: cfg.Trust,
Presence: cfg.Presence,
Provisioner: cfg.Provisioner,
Identity: cfg.Identity,
Access: cfg.Access,
DynamicAccess: cfg.DynamicAccess,
ClusterConfiguration: cfg.ClusterConfiguration,
IAuditLog: cfg.AuditLog,
Events: cfg.Events,
},
}
for _, o := range opts {
o(&as)
}
if as.clock == nil {
as.clock = clockwork.NewRealClock()
}
return &as, nil
}
type Services struct {
services.Trust
services.Presence
services.Provisioner
services.Identity
services.Access
services.DynamicAccess
services.ClusterConfiguration
services.Events
events.IAuditLog
}
var (
generateRequestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: teleport.MetricGenerateRequests,
Help: "Number of requests to generate new server keys",
},
)
generateThrottledRequestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: teleport.MetricGenerateRequestsThrottled,
Help: "Number of throttled requests to generate new server keys",
},
)
generateRequestsCurrent = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: teleport.MetricGenerateRequestsCurrent,
Help: "Number of current generate requests for server keys",
},
)
generateRequestsLatencies = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: teleport.MetricGenerateRequestsHistogram,
Help: "Latency for generate requests for server keys",
// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
// highest bucket start of 0.001 sec * 2^15 == 32.768 sec
Buckets: prometheus.ExponentialBuckets(0.001, 2, 16),
},
)
)
// Server keeps the cluster together. It acts as a certificate authority (CA) for
// a cluster and:
// - generates the keypair for the node it's running on
// - invites other SSH nodes to a cluster, by issuing invite tokens
// - adds other SSH nodes to a cluster, by checking their token and signing their keys
// - same for users and their sessions
// - checks public keys to see if they're signed by it (can be trusted or not)
type Server struct {
lock sync.RWMutex
oidcClients map[string]*oidcClient
samlProviders map[string]*samlProvider
githubClients map[string]*githubClient
clock clockwork.Clock
bk backend.Backend
closeCtx context.Context
cancelFunc context.CancelFunc
sshca.Authority
// AuthServiceName is a human-readable name of this CA. If several Auth services are running
// (managing multiple teleport clusters) this field is used to tell them apart in UIs
// It usually defaults to the hostname of the machine the Auth service runs on.
AuthServiceName string
// Services encapsulate services - provisioner, trust, etc
// used by the auth server in a separate structure
Services
// privateKey is used in tests to use pre-generated private keys
privateKey []byte
// cipherSuites is a list of ciphersuites that the auth server supports.
cipherSuites []uint16
// caSigningAlg is an SSH signing algorithm to use when generating new CAs.
caSigningAlg *string
// cache is a fast cache that allows auth server
// to use cache for most frequent operations,
// if not set, cache uses itself
cache Cache
limiter *limiter.ConnectionsLimiter
// Emitter is events emitter, used to submit discrete events
emitter events.Emitter
// streamer is events sessionstreamer, used to create continuous
// session related streams
streamer events.Streamer
}
// SetCache sets cache used by auth server
func (a *Server) SetCache(clt Cache) {
a.lock.Lock()
defer a.lock.Unlock()
a.cache = clt
}
// GetCache returns cache used by auth server
func (a *Server) GetCache() Cache {
a.lock.RLock()
defer a.lock.RUnlock()
if a.cache == nil {
return &a.Services
}
return a.cache
}
// runPeriodicOperations runs some periodic bookkeeping operations
// performed by auth server
func (a *Server) runPeriodicOperations() {
// run periodic functions with a semi-random period
// to avoid contention on the database in case if there are multiple
// auth servers running - so they don't compete trying
// to update the same resources.
r := rand.New(rand.NewSource(a.GetClock().Now().UnixNano()))
period := defaults.HighResPollingPeriod + time.Duration(r.Intn(int(defaults.HighResPollingPeriod/time.Second)))*time.Second
log.Debugf("Ticking with period: %v.", period)
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-a.closeCtx.Done():
return
case <-ticker.C:
err := a.autoRotateCertAuthorities()
if err != nil {
if trace.IsCompareFailed(err) {
log.Debugf("Cert authority has been updated concurrently: %v.", err)
} else {
log.Errorf("Failed to perform cert rotation check: %v.", err)
}
}
}
}
}
func (a *Server) Close() error {
a.cancelFunc()
if a.bk != nil {
return trace.Wrap(a.bk.Close())
}
return nil
}
func (a *Server) GetClock() clockwork.Clock {
a.lock.RLock()
defer a.lock.RUnlock()
return a.clock
}
// SetClock sets clock, used in tests
func (a *Server) SetClock(clock clockwork.Clock) {
a.lock.Lock()
defer a.lock.Unlock()
a.clock = clock
}
// SetAuditLog sets the server's audit log
func (a *Server) SetAuditLog(auditLog events.IAuditLog) {
a.IAuditLog = auditLog
}
// GetClusterConfig gets ClusterConfig from the backend.
func (a *Server) GetClusterConfig(opts ...services.MarshalOption) (services.ClusterConfig, error) {
return a.GetCache().GetClusterConfig(opts...)
}
// GetClusterName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *Server) GetClusterName(opts ...services.MarshalOption) (services.ClusterName, error) {
return a.GetCache().GetClusterName(opts...)
}
// GetDomainName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *Server) GetDomainName() (string, error) {
clusterName, err := a.GetClusterName()
if err != nil {
return "", trace.Wrap(err)
}
return clusterName.GetClusterName(), nil
}
// LocalCAResponse contains PEM-encoded local CAs.
type LocalCAResponse struct {
// TLSCA is the PEM-encoded TLS certificate authority.
TLSCA []byte `json:"tls_ca"`
}
// GetClusterCACert returns the CAs for the local cluster without signing keys.
func (a *Server) GetClusterCACert() (*LocalCAResponse, error) {
clusterName, err := a.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
// Extract the TLS CA for this cluster.
hostCA, err := a.GetCache().GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, false)
if err != nil {
return nil, trace.Wrap(err)
}
tlsCA, err := hostCA.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
// Marshal to PEM bytes to send the CA over the wire.
pemBytes, err := tlsca.MarshalCertificatePEM(tlsCA.Cert)
if err != nil {
return nil, trace.Wrap(err)
}
return &LocalCAResponse{
TLSCA: pemBytes,
}, nil
}
// GenerateHostCert uses the private key of the CA to sign the public key of the host
// (along with meta data like host ID, node name, roles, and ttl) to generate a host certificate.
func (a *Server) GenerateHostCert(hostPublicKey []byte, hostID, nodeName string, principals []string, clusterName string, roles teleport.Roles, ttl time.Duration) ([]byte, error) {
domainName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
// get the certificate authority that will be signing the public key of the host
ca, err := a.Trust.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: domainName,
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for '%s': %v", domainName, err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// create and sign!
return a.Authority.GenerateHostCert(services.HostCertParams{
PrivateCASigningKey: caPrivateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicHostKey: hostPublicKey,
HostID: hostID,
NodeName: nodeName,
Principals: principals,
ClusterName: clusterName,
Roles: roles,
TTL: ttl,
})
}
// certs is a pair of SSH and TLS certificates
type certs struct {
// ssh is PEM encoded SSH certificate
ssh []byte
// tls is PEM encoded TLS certificate
tls []byte
}
type certRequest struct {
// user is a user to generate certificate for
user services.User
// checker is used to perform RBAC checks.
checker services.AccessChecker
// ttl is Duration of the certificate
ttl time.Duration
// publicKey is RSA public key in authorized_keys format
publicKey []byte
// compatibility is compatibility mode
compatibility string
// overrideRoleTTL is used for requests when the requested TTL should not be
// adjusted based off the role of the user. This is used by tctl to allow
// creating long lived user certs.
overrideRoleTTL bool
// usage is a list of acceptable usages to be encoded in X509 certificate,
// is used to limit ways the certificate can be used, for example
// the cert can be only used against kubernetes endpoint, and not auth endpoint,
// no usage means unrestricted (to keep backwards compatibility)
usage []string
// routeToCluster is an optional teleport cluster name to route the
// certificate requests to, this teleport cluster name will be used to
// route the requests to in case of kubernetes
routeToCluster string
// kubernetesCluster specifies the target kubernetes cluster for TLS
// identities. This can be empty on older Teleport clients.
kubernetesCluster string
// traits hold claim data used to populate a role at runtime.
traits wrappers.Traits
// activeRequests tracks privilege escalation requests applied
// during the construction of the certificate.
activeRequests services.RequestIDs
// appSessionID is the session ID of the application session.
appSessionID string
// appPublicAddr is the public address of the application.
appPublicAddr string
// appClusterName is the name of the cluster this application is in.
appClusterName string
}
// GenerateUserTestCerts is used to generate user certificate, used internally for tests
func (a *Server) GenerateUserTestCerts(key []byte, username string, ttl time.Duration, compatibility, routeToCluster string) ([]byte, []byte, error) {
user, err := a.Identity.GetUser(username, false)
if err != nil {
return nil, nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(user.GetRoles(), a.Access, user.GetTraits())
if err != nil {
return nil, nil, trace.Wrap(err)
}
certs, err := a.generateUserCert(certRequest{
user: user,
ttl: ttl,
compatibility: compatibility,
publicKey: key,
routeToCluster: routeToCluster,
checker: checker,
traits: user.GetTraits(),
})
if err != nil {
return nil, nil, trace.Wrap(err)
}
return certs.ssh, certs.tls, nil
}
// GenerateUserAppTestCert generates an application specific certificate, used
// internally for tests.
func (a *Server) GenerateUserAppTestCert(publicKey []byte, username string, ttl time.Duration, publicAddr string, clusterName string) ([]byte, error) {
user, err := a.Identity.GetUser(username, false)
if err != nil {
return nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(user.GetRoles(), a.Access, user.GetTraits())
if err != nil {
return nil, trace.Wrap(err)
}
certs, err := a.generateUserCert(certRequest{
user: user,
publicKey: publicKey,
checker: checker,
ttl: ttl,
// Set the login to be a random string. Application certificates are never
// used to log into servers but SSH certificate generation code requires a
// principal be in the certificate.
traits: wrappers.Traits(map[string][]string{
teleport.TraitLogins: []string{uuid.New()},
}),
// Only allow this certificate to be used for applications.
usage: []string{teleport.UsageAppsOnly},
// Add in the application routing information.
appSessionID: uuid.New(),
appPublicAddr: publicAddr,
appClusterName: clusterName,
})
if err != nil {
return nil, trace.Wrap(err)
}
return certs.tls, nil
}
// generateUserCert generates user certificates
func (a *Server) generateUserCert(req certRequest) (*certs, error) {
// reuse the same RSA keys for SSH and TLS keys
cryptoPubKey, err := sshutils.CryptoPublicKey(req.publicKey)
if err != nil {
return nil, trace.Wrap(err)
}
// extract the passed in certificate format. if nothing was passed in, fetch
// the certificate format from the role.
certificateFormat, err := utils.CheckCertificateFormatFlag(req.compatibility)
if err != nil {
return nil, trace.Wrap(err)
}
if certificateFormat == teleport.CertificateFormatUnspecified {
certificateFormat = req.checker.CertificateFormat()
}
var sessionTTL time.Duration
var allowedLogins []string
// If the role TTL is ignored, do not restrict session TTL and allowed logins.
// The only caller setting this parameter should be "tctl auth sign".
// Otherwise set the session TTL to the smallest of all roles and
// then only grant access to allowed logins based on that.
if req.overrideRoleTTL {
// Take whatever was passed in. Pass in 0 to CheckLoginDuration so all
// logins are returned for the role set.
sessionTTL = req.ttl
allowedLogins, err = req.checker.CheckLoginDuration(0)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
// Adjust session TTL to the smaller of two values: the session TTL
// requested in tsh or the session TTL for the role.
sessionTTL = req.checker.AdjustSessionTTL(req.ttl)
// Return a list of logins that meet the session TTL limit. This means if
// the requested session TTL is larger than the max session TTL for a login,
// that login will not be included in the list of allowed logins.
allowedLogins, err = req.checker.CheckLoginDuration(sessionTTL)
if err != nil {
return nil, trace.Wrap(err)
}
}
clusterName, err := a.GetDomainName()
if err != nil {
return nil, trace.Wrap(err)
}
ca, err := a.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: clusterName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
sshCert, err := a.Authority.GenerateUserCert(services.UserCertParams{
PrivateCASigningKey: privateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicUserKey: req.publicKey,
Username: req.user.GetName(),
AllowedLogins: allowedLogins,
TTL: sessionTTL,
Roles: req.checker.RoleNames(),
CertificateFormat: certificateFormat,
PermitPortForwarding: req.checker.CanPortForward(),
PermitAgentForwarding: req.checker.CanForwardAgents(),
PermitX11Forwarding: req.checker.PermitX11Forwarding(),
RouteToCluster: req.routeToCluster,
Traits: req.traits,
ActiveRequests: req.activeRequests,
})
if err != nil {
return nil, trace.Wrap(err)
}
kubeGroups, kubeUsers, err := req.checker.CheckKubeGroupsAndUsers(sessionTTL, req.overrideRoleTTL)
// NotFound errors are acceptable - this user may have no k8s access
// granted and that shouldn't prevent us from issuing a TLS cert.
if err != nil && !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
// Only validate/default kubernetes cluster name for the current teleport
// cluster. If this cert is targeting a trusted teleport cluster, leave all
// the kubernetes cluster validation up to them.
if req.routeToCluster == "" || req.routeToCluster == clusterName {
req.kubernetesCluster, err = kubeutils.CheckOrSetKubeCluster(a.closeCtx, a.Presence, req.kubernetesCluster, clusterName)
if err != nil {
if !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
log.WithError(err).Debug("Failed setting default kubernetes cluster for user login (user did not provide a cluster); leaving KubernetesCluster extension in the TLS certificate empty")
}
}
// generate TLS certificate
tlsAuthority, err := ca.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
identity := tlsca.Identity{
Username: req.user.GetName(),
Groups: req.checker.RoleNames(),
Principals: allowedLogins,
Usage: req.usage,
RouteToCluster: req.routeToCluster,
KubernetesCluster: req.kubernetesCluster,
Traits: req.traits,
KubernetesGroups: kubeGroups,
KubernetesUsers: kubeUsers,
RouteToApp: tlsca.RouteToApp{
SessionID: req.appSessionID,
PublicAddr: req.appPublicAddr,
ClusterName: req.appClusterName,
},
TeleportCluster: clusterName,
}
subject, err := identity.Subject()
if err != nil {
return nil, trace.Wrap(err)
}
certRequest := tlsca.CertificateRequest{
Clock: a.clock,
PublicKey: cryptoPubKey,
Subject: subject,
NotAfter: a.clock.Now().UTC().Add(sessionTTL),
}
tlsCert, err := tlsAuthority.GenerateCertificate(certRequest)
if err != nil {
return nil, trace.Wrap(err)
}
return &certs{ssh: sshCert, tls: tlsCert}, nil
}
// WithUserLock executes function authenticateFn that performs user authentication
// if authenticateFn returns non nil error, the login attempt will be logged in as failed.
// The only exception to this rule is ConnectionProblemError, in case if it occurs
// access will be denied, but login attempt will not be recorded
// this is done to avoid potential user lockouts due to backend failures
// In case if user exceeds defaults.MaxLoginAttempts
// the user account will be locked for defaults.AccountLockInterval
func (a *Server) WithUserLock(username string, authenticateFn func() error) error {
user, err := a.Identity.GetUser(username, false)
if err != nil {
if trace.IsNotFound(err) {
// If user is not found, still call authenticateFn. It should
// always return an error. This prevents username oracles and
// timing attacks.
return authenticateFn()
}
return trace.Wrap(err)
}
status := user.GetStatus()
if status.IsLocked && status.LockExpires.After(a.clock.Now().UTC()) {
return trace.AccessDenied("%v exceeds %v failed login attempts, locked until %v",
user.GetName(), defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
}
fnErr := authenticateFn()
if fnErr == nil {
// upon successful login, reset the failed attempt counter
err = a.DeleteUserLoginAttempts(username)
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
return nil
}
// do not lock user in case if DB is flaky or down
if trace.IsConnectionProblem(err) {
return trace.Wrap(fnErr)
}
// log failed attempt and possibly lock user
attempt := services.LoginAttempt{Time: a.clock.Now().UTC(), Success: false}
err = a.AddUserLoginAttempt(username, attempt, defaults.AttemptTTL)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
loginAttempts, err := a.Identity.GetUserLoginAttempts(username)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
if !services.LastFailed(defaults.MaxLoginAttempts, loginAttempts) {
log.Debugf("%v user has less than %v failed login attempts", username, defaults.MaxLoginAttempts)
return trace.Wrap(fnErr)
}
lockUntil := a.clock.Now().UTC().Add(defaults.AccountLockInterval)
message := fmt.Sprintf("%v exceeds %v failed login attempts, locked until %v",
username, defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
log.Debug(message)
user.SetLocked(lockUntil, "user has exceeded maximum failed login attempts")
err = a.Identity.UpsertUser(user)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
return trace.AccessDenied(message)
}
// PreAuthenticatedSignIn is for 2-way authentication methods like U2F where the password is
// already checked before issuing the second factor challenge
func (a *Server) PreAuthenticatedSignIn(user string, identity tlsca.Identity) (services.WebSession, error) {
roles, traits, err := services.ExtractFromIdentity(a, identity)
if err != nil {
return nil, trace.Wrap(err)
}
sess, err := a.NewWebSession(user, roles, traits)
if err != nil {
return nil, trace.Wrap(err)
}
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (a *Server) U2FSignRequest(user string, password []byte) (*u2f.SignRequest, error) {
cap, err := a.GetAuthPreference()
if err != nil {
return nil, trace.Wrap(err)
}
universalSecondFactor, err := cap.GetU2F()
if err != nil {
return nil, trace.Wrap(err)
}
err = a.WithUserLock(user, func() error {
return a.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
registration, err := a.GetU2FRegistration(user)
if err != nil {
return nil, trace.Wrap(err)
}
challenge, err := u2f.NewChallenge(universalSecondFactor.AppID, universalSecondFactor.Facets)
if err != nil {
return nil, trace.Wrap(err)
}
err = a.UpsertU2FSignChallenge(user, challenge)
if err != nil {
return nil, trace.Wrap(err)
}
u2fSignReq := challenge.SignRequest(*registration)
return u2fSignReq, nil
}
func (a *Server) CheckU2FSignResponse(user string, response *u2f.SignResponse) error {
// before trying to register a user, see U2F is actually setup on the backend
cap, err := a.GetAuthPreference()
if err != nil {
return trace.Wrap(err)
}
_, err = cap.GetU2F()
if err != nil {
return trace.Wrap(err)
}
reg, err := a.GetU2FRegistration(user)
if err != nil {
return trace.Wrap(err)
}
counter, err := a.GetU2FRegistrationCounter(user)
if err != nil {
return trace.Wrap(err)
}
challenge, err := a.GetU2FSignChallenge(user)
if err != nil {
return trace.Wrap(err)
}
newCounter, err := reg.Authenticate(*response, *challenge, counter)
if err != nil {
return trace.Wrap(err)
}
err = a.UpsertU2FRegistrationCounter(user, newCounter)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// ExtendWebSession creates a new web session for a user based on a valid previous sessionID.
// Additional roles are appended to initial roles if there is an approved access request.
func (a *Server) ExtendWebSession(user, prevSessionID, accessRequestID string, identity tlsca.Identity) (services.WebSession, error) {
prevSession, err := a.GetWebSession(user, prevSessionID)
if err != nil {
return nil, trace.Wrap(err)
}
// consider absolute expiry time that may be set for this session
// by some external identity serivce, so we can not renew this session
// any more without extra logic for renewal with external OIDC provider
expiresAt := prevSession.GetExpiryTime()
if !expiresAt.IsZero() && expiresAt.Before(a.clock.Now().UTC()) {
return nil, trace.NotFound("web session has expired")
}
roles, traits, err := services.ExtractFromIdentity(a, identity)
if err != nil {
return nil, trace.Wrap(err)
}
if accessRequestID != "" {
newRoles, requestExpiry, err := a.getRolesAndExpiryFromAccessRequest(user, accessRequestID)
if err != nil {
return nil, trace.Wrap(err)
}
roles = append(roles, newRoles...)
roles = utils.Deduplicate(roles)
// Let session expire with access request expiry.
expiresAt = requestExpiry
}
sess, err := a.NewWebSession(user, roles, traits)
if err != nil {
return nil, trace.Wrap(err)
}
sess.SetExpiryTime(expiresAt)
bearerTokenTTL := utils.MinTTL(utils.ToTTL(a.clock, expiresAt), BearerTokenTTL)
sess.SetBearerTokenExpiryTime(a.clock.Now().UTC().Add(bearerTokenTTL))
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().ExtendWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
func (a *Server) getRolesAndExpiryFromAccessRequest(user, accessRequestID string) ([]string, time.Time, error) {
reqFilter := services.AccessRequestFilter{
User: user,
ID: accessRequestID,
}
reqs, err := a.GetAccessRequests(context.TODO(), reqFilter)
if err != nil {
return nil, time.Time{}, trace.Wrap(err)
}
if len(reqs) < 1 {
return nil, time.Time{}, trace.NotFound("access request %q not found", accessRequestID)
}
req := reqs[0]
if !req.GetState().IsApproved() {
if req.GetState().IsDenied() {
return nil, time.Time{}, trace.AccessDenied("access request %q has been denied", accessRequestID)
}
return nil, time.Time{}, trace.BadParameter("access request %q is awaiting approval", accessRequestID)
}
if err := services.ValidateAccessRequest(a, req); err != nil {
return nil, time.Time{}, trace.Wrap(err)
}
accessExpiry := req.GetAccessExpiry()
if accessExpiry.Before(a.GetClock().Now()) {
return nil, time.Time{}, trace.BadParameter("access request %q has expired", accessRequestID)
}
return req.GetRoles(), accessExpiry, nil
}
// CreateWebSession creates a new web session for user without any
// checks, is used by admins
func (a *Server) CreateWebSession(user string) (services.WebSession, error) {
u, err := a.GetUser(user, false)
if err != nil {
return nil, trace.Wrap(err)
}
sess, err := a.NewWebSession(user, u.GetRoles(), u.GetTraits())
if err != nil {
return nil, trace.Wrap(err)
}
if err := a.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().GenerateWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
// GenerateTokenRequest is a request to generate auth token
type GenerateTokenRequest struct {
// Token if provided sets the token value, otherwise will be auto generated
Token string `json:"token"`
// Roles is a list of roles this token authenticates as
Roles teleport.Roles `json:"roles"`
// TTL is a time to live for token
TTL time.Duration `json:"ttl"`
// Labels sets token labels, e.g. {env: prod, region: us-west}.
// Labels are later passed to resources that are joining
// e.g. remote clusters and in the future versions, nodes and proxies.
Labels map[string]string `json:"labels"`
}
// CheckAndSetDefaults checks and sets default values of request
func (req *GenerateTokenRequest) CheckAndSetDefaults() error {
for _, role := range req.Roles {
if err := role.Check(); err != nil {
return trace.Wrap(err)
}
}
if req.TTL == 0 {
req.TTL = defaults.ProvisioningTokenTTL
}
if req.Token == "" {
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return trace.Wrap(err)
}
req.Token = token
}
return nil
}
// GenerateToken generates multi-purpose authentication token.
func (a *Server) GenerateToken(ctx context.Context, req GenerateTokenRequest) (string, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return "", trace.Wrap(err)
}
token, err := services.NewProvisionToken(req.Token, req.Roles, a.clock.Now().UTC().Add(req.TTL))
if err != nil {
return "", trace.Wrap(err)
}
if len(req.Labels) != 0 {
meta := token.GetMetadata()
meta.Labels = req.Labels
token.SetMetadata(meta)
}
if err := a.Provisioner.UpsertToken(token); err != nil {
return "", trace.Wrap(err)
}
user := clientUsername(ctx)
for _, role := range req.Roles {
if role == teleport.RoleTrustedCluster {
if err := a.emitter.EmitAuditEvent(ctx, &events.TrustedClusterTokenCreate{
Metadata: events.Metadata{
Type: events.TrustedClusterTokenCreateEvent,
Code: events.TrustedClusterTokenCreateCode,
},
UserMetadata: events.UserMetadata{
User: user,
},
}); err != nil {
log.WithError(err).Warn("Failed to emit trusted cluster token create event.")
}
}
}
return req.Token, nil
}
// ExtractHostID returns host id based on the hostname
func ExtractHostID(hostName string, clusterName string) (string, error) {
suffix := "." + clusterName
if !strings.HasSuffix(hostName, suffix) {
return "", trace.BadParameter("expected suffix %q in %q", suffix, hostName)
}
return strings.TrimSuffix(hostName, suffix), nil
}
// HostFQDN consits of host UUID and cluster name joined via .
func HostFQDN(hostUUID, clusterName string) string {
return fmt.Sprintf("%v.%v", hostUUID, clusterName)
}
// GenerateServerKeysRequest is a request to generate server keys
type GenerateServerKeysRequest struct {
// HostID is a unique ID of the host
HostID string `json:"host_id"`
// NodeName is a user friendly host name
NodeName string `json:"node_name"`
// Roles is a list of roles assigned to node
Roles teleport.Roles `json:"roles"`
// AdditionalPrincipals is a list of additional principals
// to include in OpenSSH and X509 certificates
AdditionalPrincipals []string `json:"additional_principals"`
// DNSNames is a list of DNS names
// to include in the x509 client certificate
DNSNames []string `json:"dns_names"`
// PublicTLSKey is a PEM encoded public key
// used for TLS setup
PublicTLSKey []byte `json:"public_tls_key"`
// PublicSSHKey is a SSH encoded public key,
// if present will be signed as a return value
// otherwise, new public/private key pair will be generated
PublicSSHKey []byte `json:"public_ssh_key"`
// RemoteAddr is the IP address of the remote host requesting a host
// certificate. RemoteAddr is used to replace 0.0.0.0 in the list of
// additional principals.
RemoteAddr string `json:"remote_addr"`
// Rotation allows clients to send the certificate authority rotation state
// expected by client of the certificate authority backends, so auth servers
// can avoid situation when clients request certs assuming one
// state, and auth servers issue another
Rotation *services.Rotation `json:"rotation,omitempty"`
// NoCache is argument that only local callers can supply to bypass cache
NoCache bool `json:"-"`
}
// CheckAndSetDefaults checks and sets default values
func (req *GenerateServerKeysRequest) CheckAndSetDefaults() error {
if req.HostID == "" {
return trace.BadParameter("missing parameter HostID")
}
if len(req.Roles) != 1 {
return trace.BadParameter("expected only one system role, got %v", len(req.Roles))
}
return nil
}
// GenerateServerKeys generates new host private keys and certificates (signed
// by the host certificate authority) for a node.
func (a *Server) GenerateServerKeys(req GenerateServerKeysRequest) (*PackedKeys, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
if err := a.limiter.AcquireConnection(req.Roles.String()); err != nil {
generateThrottledRequestsCount.Inc()
log.Debugf("Node %q [%v] is rate limited: %v.", req.NodeName, req.HostID, req.Roles)
return nil, trace.Wrap(err)
}
defer a.limiter.ReleaseConnection(req.Roles.String())
// only observe latencies for non-throttled requests
start := a.clock.Now()
defer generateRequestsLatencies.Observe(time.Since(start).Seconds())
generateRequestsCount.Inc()
generateRequestsCurrent.Inc()
defer generateRequestsCurrent.Dec()
clusterName, err := a.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
// If the request contains 0.0.0.0, this implies an advertise IP was not
// specified on the node. Try and guess what the address by replacing 0.0.0.0
// with the RemoteAddr as known to the Auth Server.
if utils.SliceContainsStr(req.AdditionalPrincipals, defaults.AnyAddress) {
remoteHost, err := utils.Host(req.RemoteAddr)
if err != nil {
return nil, trace.Wrap(err)
}
req.AdditionalPrincipals = utils.ReplaceInSlice(
req.AdditionalPrincipals,
defaults.AnyAddress,
remoteHost)
}
var cryptoPubKey crypto.PublicKey
var privateKeyPEM, pubSSHKey []byte
if req.PublicSSHKey != nil || req.PublicTLSKey != nil {
_, _, _, _, err := ssh.ParseAuthorizedKey(req.PublicSSHKey)
if err != nil {
return nil, trace.BadParameter("failed to parse SSH public key")
}
pubSSHKey = req.PublicSSHKey
cryptoPubKey, err = tlsca.ParsePublicKeyPEM(req.PublicTLSKey)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
// generate private key
privateKeyPEM, pubSSHKey, err = a.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
// reuse the same RSA keys for SSH and TLS keys
cryptoPubKey, err = sshutils.CryptoPublicKey(pubSSHKey)
if err != nil {
return nil, trace.Wrap(err)
}
}
// get the certificate authority that will be signing the public key of the host,
client := a.GetCache()
if req.NoCache {
client = &a.Services
}
ca, err := client.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for %q: %v", clusterName.GetClusterName(), err)
}
// could be a couple of scenarios, either client data is out of sync,
// or auth server is out of sync, either way, for now check that
// cache is out of sync, this will result in higher read rate
// to the backend, which is a fine tradeoff
if !req.NoCache && req.Rotation != nil && !req.Rotation.Matches(ca.GetRotation()) {
log.Debugf("Client sent rotation state %v, cache state is %v, using state from the DB.", req.Rotation, ca.GetRotation())
ca, err = a.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for %q: %v", clusterName.GetClusterName(), err)
}
if !req.Rotation.Matches(ca.GetRotation()) {
return nil, trace.BadParameter("the client expected state is out of sync, server rotation state: %v, client rotation state: %v, re-register the client from scratch to fix the issue.", ca.GetRotation(), req.Rotation)
}
}
tlsAuthority, err := ca.TLSCA()
if err != nil {
return nil, trace.Wrap(err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// generate hostSSH certificate
hostSSHCert, err := a.Authority.GenerateHostCert(services.HostCertParams{
PrivateCASigningKey: caPrivateKey,
CASigningAlg: ca.GetSigningAlg(),
PublicHostKey: pubSSHKey,
HostID: req.HostID,
NodeName: req.NodeName,
ClusterName: clusterName.GetClusterName(),
Roles: req.Roles,
Principals: req.AdditionalPrincipals,
})
if err != nil {
return nil, trace.Wrap(err)
}
// generate host TLS certificate
identity := tlsca.Identity{
Username: HostFQDN(req.HostID, clusterName.GetClusterName()),
Groups: req.Roles.StringSlice(),
TeleportCluster: clusterName.GetClusterName(),
}
subject, err := identity.Subject()
if err != nil {
return nil, trace.Wrap(err)
}
certRequest := tlsca.CertificateRequest{
Clock: a.clock,
PublicKey: cryptoPubKey,
Subject: subject,
NotAfter: a.clock.Now().UTC().Add(defaults.CATTL),
DNSNames: append([]string{}, req.AdditionalPrincipals...),
}
// HTTPS requests need to specify DNS name that should be present in the
// certificate as one of the DNS Names. It is not known in advance,
// that is why there is a default one for all certificates
if req.Roles.Include(teleport.RoleAuth) || req.Roles.Include(teleport.RoleAdmin) || req.Roles.Include(teleport.RoleApp) {
certRequest.DNSNames = append(certRequest.DNSNames, "*."+teleport.APIDomain, teleport.APIDomain)
}
// Unlike additional principals, DNS Names is x509 specific and is limited
// to services with TLS endpoints (e.g. auth, proxies, kubernetes)
if req.Roles.Include(teleport.RoleAuth) || req.Roles.Include(teleport.RoleAdmin) || req.Roles.Include(teleport.RoleProxy) || req.Roles.Include(teleport.RoleKube) {
certRequest.DNSNames = append(certRequest.DNSNames, req.DNSNames...)
}
hostTLSCert, err := tlsAuthority.GenerateCertificate(certRequest)
if err != nil {
return nil, trace.Wrap(err)
}
return &PackedKeys{
Key: privateKeyPEM,
Cert: hostSSHCert,
TLSCert: hostTLSCert,
TLSCACerts: services.TLSCerts(ca),
SSHCACerts: ca.GetCheckingKeys(),
}, nil
}
// ValidateToken takes a provisioning token value and finds if it's valid. Returns
// a list of roles this token allows its owner to assume and token labels, or an error if the token
// cannot be found.
func (a *Server) ValidateToken(token string) (teleport.Roles, map[string]string, error) {
tkns, err := a.GetCache().GetStaticTokens()
if err != nil {
return nil, nil, trace.Wrap(err)
}
// First check if the token is a static token. If it is, return right away.
// Static tokens have no expiration.
for _, st := range tkns.GetStaticTokens() {
if subtle.ConstantTimeCompare([]byte(st.GetName()), []byte(token)) == 1 {
return st.GetRoles(), nil, nil
}
}
// If it's not a static token, check if it's a ephemeral token in the backend.
// If a ephemeral token is found, make sure it's still valid.
tok, err := a.GetCache().GetToken(token)
if err != nil {
return nil, nil, trace.Wrap(err)
}
if !a.checkTokenTTL(tok) {
return nil, nil, trace.AccessDenied("token expired")
}
return tok.GetRoles(), tok.GetMetadata().Labels, nil
}
// checkTokenTTL checks if the token is still valid. If it is not, the token
// is removed from the backend and returns false. Otherwise returns true.
func (a *Server) checkTokenTTL(tok services.ProvisionToken) bool {
now := a.clock.Now().UTC()
if tok.Expiry().Before(now) {
err := a.DeleteToken(tok.GetName())
if err != nil {
if !trace.IsNotFound(err) {
log.Warnf("Unable to delete token from backend: %v.", err)
}
}
return false
}
return true
}
// RegisterUsingTokenRequest is a request to register with
// auth server using authentication token
type RegisterUsingTokenRequest struct {
// HostID is a unique host ID, usually a UUID
HostID string `json:"hostID"`
// NodeName is a node name
NodeName string `json:"node_name"`
// Role is a system role, e.g. Proxy
Role teleport.Role `json:"role"`
// Token is an authentication token
Token string `json:"token"`
// AdditionalPrincipals is a list of additional principals
AdditionalPrincipals []string `json:"additional_principals"`
// DNSNames is a list of DNS names to include in the x509 client certificate
DNSNames []string `json:"dns_names"`
// PublicTLSKey is a PEM encoded public key
// used for TLS setup
PublicTLSKey []byte `json:"public_tls_key"`
// PublicSSHKey is a SSH encoded public key,
// if present will be signed as a return value
// otherwise, new public/private key pair will be generated
PublicSSHKey []byte `json:"public_ssh_key"`
// RemoteAddr is the remote address of the host requesting a host certificate.
// It is used to replace 0.0.0.0 in the list of additional principals.
RemoteAddr string `json:"remote_addr"`
}
// CheckAndSetDefaults checks for errors and sets defaults
func (r *RegisterUsingTokenRequest) CheckAndSetDefaults() error {
if r.HostID == "" {
return trace.BadParameter("missing parameter HostID")
}
if r.Token == "" {
return trace.BadParameter("missing parameter Token")
}
if err := r.Role.Check(); err != nil {
return trace.Wrap(err)
}
return nil
}
// RegisterUsingToken adds a new node to the Teleport cluster using previously issued token.
// A node must also request a specific role (and the role must match one of the roles
// the token was generated for).
//
// If a token was generated with a TTL, it gets enforced (can't register new nodes after TTL expires)
// If a token was generated with a TTL=0, it means it's a single-use token and it gets destroyed
// after a successful registration.
func (a *Server) RegisterUsingToken(req RegisterUsingTokenRequest) (*PackedKeys, error) {
log.Infof("Node %q [%v] is trying to join with role: %v.", req.NodeName, req.HostID, req.Role)
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
// make sure the token is valid
roles, _, err := a.ValidateToken(req.Token)
if err != nil {
log.Warningf("%q [%v] can not join the cluster with role %s, token error: %v", req.NodeName, req.HostID, req.Role, err)
return nil, trace.AccessDenied(fmt.Sprintf("%q [%v] can not join the cluster with role %s, the token is not valid", req.NodeName, req.HostID, req.Role))
}
// make sure the caller is requested the role allowed by the token
if !roles.Include(req.Role) {
msg := fmt.Sprintf("node %q [%v] can not join the cluster, the token does not allow %q role", req.NodeName, req.HostID, req.Role)
log.Warn(msg)
return nil, trace.BadParameter(msg)
}
// generate and return host certificate and keys
keys, err := a.GenerateServerKeys(GenerateServerKeysRequest{
HostID: req.HostID,
NodeName: req.NodeName,
Roles: teleport.Roles{req.Role},
AdditionalPrincipals: req.AdditionalPrincipals,
PublicTLSKey: req.PublicTLSKey,
PublicSSHKey: req.PublicSSHKey,
RemoteAddr: req.RemoteAddr,
DNSNames: req.DNSNames,
})
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("Node %q [%v] has joined the cluster.", req.NodeName, req.HostID)
return keys, nil
}
func (a *Server) RegisterNewAuthServer(token string) error {
tok, err := a.Provisioner.GetToken(token)
if err != nil {
return trace.Wrap(err)
}
if !tok.GetRoles().Include(teleport.RoleAuth) {
return trace.AccessDenied("role does not match")
}
if err := a.DeleteToken(token); err != nil {
return trace.Wrap(err)
}
return nil
}
func (a *Server) DeleteToken(token string) (err error) {
tkns, err := a.GetStaticTokens()
if err != nil {
return trace.Wrap(err)
}
// is this a static token?
for _, st := range tkns.GetStaticTokens() {
if subtle.ConstantTimeCompare([]byte(st.GetName()), []byte(token)) == 1 {
return trace.BadParameter("token %s is statically configured and cannot be removed", token)
}
}
// delete reset password token:
if err = a.Identity.DeleteResetPasswordToken(context.TODO(), token); err == nil {
return nil
}
// delete node token:
if err = a.Provisioner.DeleteToken(token); err == nil {
return nil
}
return trace.Wrap(err)
}
// GetTokens returns all tokens (machine provisioning ones and user invitation tokens). Machine
// tokens usually have "node roles", like auth,proxy,node and user invitation tokens have 'signup' role
func (a *Server) GetTokens(opts ...services.MarshalOption) (tokens []services.ProvisionToken, err error) {
// get node tokens:
tokens, err = a.Provisioner.GetTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// get static tokens:
tkns, err := a.GetStaticTokens()
if err != nil && !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
if err == nil {
tokens = append(tokens, tkns.GetStaticTokens()...)
}
// get reset password tokens:
resetPasswordTokens, err := a.Identity.GetResetPasswordTokens(context.TODO())
if err != nil {
return nil, trace.Wrap(err)
}
// convert reset password tokens to machine tokens:
for _, t := range resetPasswordTokens {
roles := teleport.Roles{teleport.RoleSignup}
tok, err := services.NewProvisionToken(t.GetName(), roles, t.Expiry())
if err != nil {
return nil, trace.Wrap(err)
}
tokens = append(tokens, tok)
}
return tokens, nil
}
func (a *Server) NewWebSession(username string, roles []string, traits wrappers.Traits) (services.WebSession, error) {
user, err := a.GetUser(username, false)
if err != nil {
return nil, trace.Wrap(err)
}
checker, err := services.FetchRoles(roles, a.Access, traits)
if err != nil {
return nil, trace.Wrap(err)
}
priv, pub, err := a.GetNewKeyPairFromPool()
if err != nil {
return nil, trace.Wrap(err)
}
sessionTTL := checker.AdjustSessionTTL(defaults.CertDuration)
certs, err := a.generateUserCert(certRequest{
user: user,
ttl: sessionTTL,
publicKey: pub,
checker: checker,
traits: traits,
})
if err != nil {
return nil, trace.Wrap(err)
}
token, err := utils.CryptoRandomHex(SessionTokenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerToken, err := utils.CryptoRandomHex(SessionTokenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerTokenTTL := utils.MinTTL(sessionTTL, BearerTokenTTL)
return services.NewWebSession(token, services.KindWebSession, services.KindWebSession, services.WebSessionSpecV2{
User: user.GetName(),
Priv: priv,
Pub: certs.ssh,
TLSCert: certs.tls,
Expires: a.clock.Now().UTC().Add(sessionTTL),
BearerToken: bearerToken,
BearerTokenExpires: a.clock.Now().UTC().Add(bearerTokenTTL),
}), nil
}
func (a *Server) UpsertWebSession(user string, sess services.WebSession) error {
return a.Identity.UpsertWebSession(user, sess.GetName(), sess)
}
func (a *Server) GetWebSession(userName string, id string) (services.WebSession, error) {
return a.Identity.GetWebSession(userName, id)
}
func (a *Server) GetWebSessionInfo(userName string, id string) (services.WebSession, error) {
sess, err := a.Identity.GetWebSession(userName, id)
if err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (a *Server) DeleteNamespace(namespace string) error {
if namespace == defaults.Namespace {
return trace.AccessDenied("can't delete default namespace")
}
nodes, err := a.Presence.GetNodes(namespace, services.SkipValidation())
if err != nil {
return trace.Wrap(err)
}
if len(nodes) != 0 {
return trace.BadParameter("can't delete namespace %v that has %v registered nodes", namespace, len(nodes))
}
return a.Presence.DeleteNamespace(namespace)
}
func (a *Server) DeleteWebSession(user string, id string) error {
return trace.Wrap(a.Identity.DeleteWebSession(user, id))
}
// NewWatcher returns a new event watcher. In case of an auth server
// this watcher will return events as seen by the auth server's
// in memory cache, not the backend.
func (a *Server) NewWatcher(ctx context.Context, watch services.Watch) (services.Watcher, error) {
return a.GetCache().NewWatcher(ctx, watch)
}
// DeleteRole deletes a role by name of the role.
func (a *Server) DeleteRole(ctx context.Context, name string) error {
// check if this role is used by CA or Users
users, err := a.Identity.GetUsers(false)
if err != nil {
return trace.Wrap(err)
}
for _, u := range users {
for _, r := range u.GetRoles() {
if r == name {
// Mask the actual error here as it could be used to enumerate users
// within the system.
log.Warnf("Failed to delete role: role %v is used by user %v.", name, u.GetName())
return trace.BadParameter("failed to delete role that still in use by a user. Check system server logs for more details.")
}
}
}
// check if it's used by some external cert authorities, e.g.
// cert authorities related to external cluster
cas, err := a.Trust.GetCertAuthorities(services.UserCA, false)
if err != nil {
return trace.Wrap(err)
}
for _, a := range cas {
for _, r := range a.GetRoles() {
if r == name {
// Mask the actual error here as it could be used to enumerate users
// within the system.
log.Warnf("Failed to delete role: role %v is used by user cert authority %v", name, a.GetClusterName())
return trace.BadParameter("failed to delete role that still in use by a user. Check system server logs for more details.")
}
}
}
if err := a.Access.DeleteRole(ctx, name); err != nil {
return trace.Wrap(err)
}
err = a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleDelete{
Metadata: events.Metadata{
Type: events.RoleDeletedEvent,
Code: events.RoleDeletedCode,
},
UserMetadata: events.UserMetadata{
User: clientUsername(ctx),
},
ResourceMetadata: events.ResourceMetadata{
Name: name,
},
})
if err != nil {
log.WithError(err).Warnf("Failed to emit role deleted event.")
}
return nil
}
// UpsertRole creates or updates role.
func (a *Server) upsertRole(ctx context.Context, role services.Role) error {
if err := a.UpsertRole(ctx, role); err != nil {
return trace.Wrap(err)
}
err := a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleCreate{
Metadata: events.Metadata{
Type: events.RoleCreatedEvent,
Code: events.RoleCreatedCode,
},
UserMetadata: events.UserMetadata{
User: clientUsername(ctx),
},
ResourceMetadata: events.ResourceMetadata{
Name: role.GetName(),
},
})
if err != nil {
log.WithError(err).Warnf("Failed to emit role create event.")
}
return nil
}
func (a *Server) CreateAccessRequest(ctx context.Context, req services.AccessRequest) error {
err := services.ValidateAccessRequest(a, req,
// if request is in state pending, role expansion must be applied
services.ExpandRoles(req.GetState().IsPending()),
// always apply system annotations before storing new requests
services.ApplySystemAnnotations(true),
)
if err != nil {
return trace.Wrap(err)
}
ttl, err := a.calculateMaxAccessTTL(req)
if err != nil {
return trace.Wrap(err)
}
now := a.clock.Now().UTC()
req.SetCreationTime(now)
exp := now.Add(ttl)
// Set acccess expiry if an allowable default was not provided.
if req.GetAccessExpiry().Before(now) || req.GetAccessExpiry().After(exp) {
req.SetAccessExpiry(exp)
}
// By default, resource expiry should match access expiry.
req.SetExpiry(req.GetAccessExpiry())
// If the access-request is in a pending state, then the expiry of the underlying resource
// is capped to to PendingAccessDuration in order to limit orphaned access requests.
if req.GetState().IsPending() {
pexp := now.Add(defaults.PendingAccessDuration)
if pexp.Before(req.Expiry()) {
req.SetExpiry(pexp)
}
}
if err := a.DynamicAccess.CreateAccessRequest(ctx, req); err != nil {
return trace.Wrap(err)
}
err = a.emitter.EmitAuditEvent(a.closeCtx, &events.AccessRequestCreate{
Metadata: events.Metadata{
Type: events.AccessRequestCreateEvent,
Code: events.AccessRequestCreateCode,
},
UserMetadata: events.UserMetadata{
User: req.GetUser(),
},
Roles: req.GetRoles(),
RequestID: req.GetName(),
RequestState: req.GetState().String(),
Reason: req.GetRequestReason(),
})
return trace.Wrap(err)
}
func (a *Server) SetAccessRequestState(ctx context.Context, params services.AccessRequestUpdate) error {
if err := a.DynamicAccess.SetAccessRequestState(ctx, params); err != nil {
return trace.Wrap(err)
}
event := &events.AccessRequestCreate{
Metadata: events.Metadata{
Type: events.AccessRequestUpdateEvent,
Code: events.AccessRequestUpdateCode,
},
ResourceMetadata: events.ResourceMetadata{
UpdatedBy: clientUsername(ctx),
},
RequestID: params.RequestID,
RequestState: params.State.String(),
Reason: params.Reason,
Roles: params.Roles,
}
if delegator := getDelegator(ctx); delegator != "" {
event.Delegator = delegator
}
if len(params.Annotations) > 0 {
annotations, err := events.EncodeMapStrings(params.Annotations)
if err != nil {
log.WithError(err).Debugf("Failed to encode access request annotations.")
} else {
event.Annotations = annotations
}
}
err := a.emitter.EmitAuditEvent(a.closeCtx, event)
if err != nil {
log.WithError(err).Warn("Failed to emit access request update event.")
}
return trace.Wrap(err)
}
// calculateMaxAccessTTL determines the maximum allowable TTL for a given access request
// based on the MaxSessionTTLs of the roles being requested (a access request's life cannot
// exceed the smallest allowable MaxSessionTTL value of the roles that it requests).
func (a *Server) calculateMaxAccessTTL(req services.AccessRequest) (time.Duration, error) {
minTTL := defaults.MaxAccessDuration
for _, roleName := range req.GetRoles() {
role, err := a.GetRole(roleName)
if err != nil {
return 0, trace.Wrap(err)
}
roleTTL := time.Duration(role.GetOptions().MaxSessionTTL)
if roleTTL > 0 && roleTTL < minTTL {
minTTL = roleTTL
}
}
return minTTL, nil
}
// NewKeepAliver returns a new instance of keep aliver
func (a *Server) NewKeepAliver(ctx context.Context) (services.KeepAliver, error) {
cancelCtx, cancel := context.WithCancel(ctx)
k := &authKeepAliver{
a: a,
ctx: cancelCtx,
cancel: cancel,
keepAlivesC: make(chan services.KeepAlive),
}
go k.forwardKeepAlives()
return k, nil
}
// GetCertAuthority returns certificate authority by given id. Parameter loadSigningKeys
// controls if signing keys are loaded
func (a *Server) GetCertAuthority(id services.CertAuthID, loadSigningKeys bool, opts ...services.MarshalOption) (services.CertAuthority, error) {
return a.GetCache().GetCertAuthority(id, loadSigningKeys, opts...)
}
// GetCertAuthorities returns a list of authorities of a given type
// loadSigningKeys controls whether signing keys should be loaded or not
func (a *Server) GetCertAuthorities(caType services.CertAuthType, loadSigningKeys bool, opts ...services.MarshalOption) ([]services.CertAuthority, error) {
return a.GetCache().GetCertAuthorities(caType, loadSigningKeys, opts...)
}
// GetStaticTokens gets the list of static tokens used to provision nodes.
func (a *Server) GetStaticTokens() (services.StaticTokens, error) {
return a.GetCache().GetStaticTokens()
}
// GetToken finds and returns token by ID
func (a *Server) GetToken(token string) (services.ProvisionToken, error) {
return a.GetCache().GetToken(token)
}
// GetRoles is a part of auth.AccessPoint implementation
func (a *Server) GetRoles() ([]services.Role, error) {
return a.GetCache().GetRoles()
}
// GetRole is a part of auth.AccessPoint implementation
func (a *Server) GetRole(name string) (services.Role, error) {
return a.GetCache().GetRole(name)
}
// GetNamespace returns namespace
func (a *Server) GetNamespace(name string) (*services.Namespace, error) {
return a.GetCache().GetNamespace(name)
}
// GetNamespaces is a part of auth.AccessPoint implementation
func (a *Server) GetNamespaces() ([]services.Namespace, error) {
return a.GetCache().GetNamespaces()
}
// GetNodes is a part of auth.AccessPoint implementation
func (a *Server) GetNodes(namespace string, opts ...services.MarshalOption) ([]services.Server, error) {
return a.GetCache().GetNodes(namespace, opts...)
}
// GetReverseTunnels is a part of auth.AccessPoint implementation
func (a *Server) GetReverseTunnels(opts ...services.MarshalOption) ([]services.ReverseTunnel, error) {
return a.GetCache().GetReverseTunnels(opts...)
}
// GetProxies is a part of auth.AccessPoint implementation
func (a *Server) GetProxies() ([]services.Server, error) {
return a.GetCache().GetProxies()
}
// GetUser is a part of auth.AccessPoint implementation.
func (a *Server) GetUser(name string, withSecrets bool) (user services.User, err error) {
return a.GetCache().GetUser(name, withSecrets)
}
// GetUsers is a part of auth.AccessPoint implementation
func (a *Server) GetUsers(withSecrets bool) (users []services.User, err error) {
return a.GetCache().GetUsers(withSecrets)
}
// GetTunnelConnections is a part of auth.AccessPoint implementation
// GetTunnelConnections are not using recent cache as they are designed
// to be called periodically and always return fresh data
func (a *Server) GetTunnelConnections(clusterName string, opts ...services.MarshalOption) ([]services.TunnelConnection, error) {
return a.GetCache().GetTunnelConnections(clusterName, opts...)
}
// GetAllTunnelConnections is a part of auth.AccessPoint implementation
// GetAllTunnelConnections are not using recent cache, as they are designed
// to be called periodically and always return fresh data
func (a *Server) GetAllTunnelConnections(opts ...services.MarshalOption) (conns []services.TunnelConnection, err error) {
return a.GetCache().GetAllTunnelConnections(opts...)
}
// CreateAuditStream creates audit event stream
func (a *Server) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) {
streamer, err := a.modeStreamer()
if err != nil {
return nil, trace.Wrap(err)
}
return streamer.CreateAuditStream(ctx, sid)
}
// ResumeAuditStream resumes the stream that has been created
func (a *Server) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) {
streamer, err := a.modeStreamer()
if err != nil {
return nil, trace.Wrap(err)
}
return streamer.ResumeAuditStream(ctx, sid, uploadID)
}
// modeStreamer creates streamer based on the event mode
func (a *Server) modeStreamer() (events.Streamer, error) {
clusterConfig, err := a.GetClusterConfig()
if err != nil {
return nil, trace.Wrap(err)
}
mode := clusterConfig.GetSessionRecording()
// In sync mode, auth server forwards session control to the event log
// in addition to sending them and data events to the record storage.
if services.IsRecordSync(mode) {
return events.NewTeeStreamer(a.streamer, a.emitter), nil
}
// In async mode, clients submit session control events
// during the session in addition to writing a local
// session recording to be uploaded at the end of the session,
// so forwarding events here will result in duplicate events.
return a.streamer, nil
}
// GetAppServers is a part of the auth.AccessPoint implementation.
func (a *Server) GetAppServers(ctx context.Context, namespace string, opts ...services.MarshalOption) ([]services.Server, error) {
return a.GetCache().GetAppServers(ctx, namespace, opts...)
}
// GetAppSession is a part of the auth.AccessPoint implementation.
func (a *Server) GetAppSession(ctx context.Context, req services.GetAppSessionRequest) (services.WebSession, error) {
return a.GetCache().GetAppSession(ctx, req)
}
// authKeepAliver is a keep aliver using auth server directly
type authKeepAliver struct {
sync.RWMutex
a *Server
ctx context.Context
cancel context.CancelFunc
keepAlivesC chan services.KeepAlive
err error
}
// KeepAlives returns a channel accepting keep alive requests
func (k *authKeepAliver) KeepAlives() chan<- services.KeepAlive {
return k.keepAlivesC
}
func (k *authKeepAliver) forwardKeepAlives() {
for {
select {
case <-k.a.closeCtx.Done():
k.Close()
return
case <-k.ctx.Done():
return
case keepAlive := <-k.keepAlivesC:
err := k.a.KeepAliveServer(k.ctx, keepAlive)
if err != nil {
k.closeWithError(err)
return
}
}
}
}
func (k *authKeepAliver) closeWithError(err error) {
k.Close()
k.Lock()
defer k.Unlock()
k.err = err
}
// Error returns the error if keep aliver
// has been closed
func (k *authKeepAliver) Error() error {
k.RLock()
defer k.RUnlock()
return k.err
}
// Done returns channel that is closed whenever
// keep aliver is closed
func (k *authKeepAliver) Done() <-chan struct{} {
return k.ctx.Done()
}
// Close closes keep aliver and cancels all goroutines
func (k *authKeepAliver) Close() error {
k.cancel()
return nil
}
const (
// BearerTokenTTL specifies standard bearer token to exist before
// it has to be renewed by the client
BearerTokenTTL = 10 * time.Minute
// TokenLenBytes is len in bytes of the invite token
TokenLenBytes = 16
// SessionTokenBytes is the number of bytes of a web or application session.
SessionTokenBytes = 32
)
// oidcClient is internal structure that stores OIDC client and its config
type oidcClient struct {
client *oidc.Client
config oidc.ClientConfig
}
// samlProvider is internal structure that stores SAML client and its config
type samlProvider struct {
provider *saml2.SAMLServiceProvider
connector services.SAMLConnector
}
// githubClient is internal structure that stores Github OAuth 2client and its config
type githubClient struct {
client *oauth2.Client
config oauth2.Config
}
// oidcConfigsEqual returns true if the provided OIDC configs are equal
func oidcConfigsEqual(a, b oidc.ClientConfig) bool {
if a.RedirectURL != b.RedirectURL {
return false
}
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
return true
}
// oauth2ConfigsEqual returns true if the provided OAuth2 configs are equal
func oauth2ConfigsEqual(a, b oauth2.Config) bool {
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if a.RedirectURL != b.RedirectURL {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
if a.AuthURL != b.AuthURL {
return false
}
if a.TokenURL != b.TokenURL {
return false
}
if a.AuthMethod != b.AuthMethod {
return false
}
return true
}
// isHTTPS checks if the scheme for a URL is https or not.
func isHTTPS(u string) error {
earl, err := url.Parse(u)
if err != nil {
return trace.Wrap(err)
}
if earl.Scheme != "https" {
return trace.BadParameter("expected scheme https, got %q", earl.Scheme)
}
return nil
}
func init() {
// Metrics have to be registered to be exposed:
prometheus.MustRegister(generateRequestsCount)
prometheus.MustRegister(generateThrottledRequestsCount)
prometheus.MustRegister(generateRequestsCurrent)
prometheus.MustRegister(generateRequestsLatencies)
}
|
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package config
import (
"fmt"
"runtime"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol"
)
type FolderConfiguration struct {
ID string `xml:"id,attr" json:"id"`
Label string `xml:"label,attr" json:"label"`
FilesystemType fs.FilesystemType `xml:"filesystemType" json:"filesystemType"`
Path string `xml:"path,attr" json:"path"`
Type FolderType `xml:"type,attr" json:"type"`
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
FSWatcherEnabled bool `xml:"fsWatcherEnabled,attr" json:"fsWatcherEnabled"`
FSWatcherDelayS int `xml:"fsWatcherDelayS,attr" json:"fsWatcherDelayS"`
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
MinDiskFree Size `xml:"minDiskFree" json:"minDiskFree"`
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
Order PullOrder `xml:"order" json:"order"`
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
ScanProgressIntervalS int `xml:"scanProgressIntervalS" json:"scanProgressIntervalS"` // Set to a negative value to disable. Value of 0 will get replaced with value of 2 (default value)
PullerSleepS int `xml:"pullerSleepS" json:"pullerSleepS"`
PullerPauseS int `xml:"pullerPauseS" json:"pullerPauseS"`
MaxConflicts int `xml:"maxConflicts" json:"maxConflicts"`
DisableSparseFiles bool `xml:"disableSparseFiles" json:"disableSparseFiles"`
DisableTempIndexes bool `xml:"disableTempIndexes" json:"disableTempIndexes"`
Paused bool `xml:"paused" json:"paused"`
WeakHashThresholdPct int `xml:"weakHashThresholdPct" json:"weakHashThresholdPct"` // Use weak hash if more than X percent of the file has changed. Set to -1 to always use weak hash.
cachedFilesystem fs.Filesystem
DeprecatedReadOnly bool `xml:"ro,attr,omitempty" json:"-"`
DeprecatedMinDiskFreePct float64 `xml:"minDiskFreePct,omitempty" json:"-"`
}
type FolderDeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr" json:"deviceID"`
IntroducedBy protocol.DeviceID `xml:"introducedBy,attr" json:"introducedBy"`
}
func NewFolderConfiguration(id string, fsType fs.FilesystemType, path string) FolderConfiguration {
f := FolderConfiguration{
ID: id,
FilesystemType: fsType,
Path: path,
}
f.prepare()
return f
}
func (f FolderConfiguration) Copy() FolderConfiguration {
c := f
c.Devices = make([]FolderDeviceConfiguration, len(f.Devices))
copy(c.Devices, f.Devices)
c.Versioning = f.Versioning.Copy()
return c
}
func (f FolderConfiguration) Filesystem() fs.Filesystem {
// This is intentionally not a pointer method, because things like
// cfg.Folders["default"].Filesystem() should be valid.
if f.cachedFilesystem == nil && f.Path != "" {
l.Infoln("bug: uncached filesystem call (should only happen in tests)")
return fs.NewFilesystem(f.FilesystemType, f.Path)
}
return f.cachedFilesystem
}
func (f *FolderConfiguration) CreateMarker() error {
if !f.HasMarker() {
permBits := fs.FileMode(0777)
if runtime.GOOS == "windows" {
// Windows has no umask so we must chose a safer set of bits to
// begin with.
permBits = 0700
}
fs := f.Filesystem()
err := fs.Mkdir(".stfolder", permBits)
if err != nil {
return err
}
if dir, err := fs.Open("."); err == nil {
if serr := dir.Sync(); err != nil {
l.Debugln("fsync %q failed: %v", ".", serr)
}
} else {
l.Infof("fsync %q failed: %v", ".", err)
}
fs.Hide(".stfolder")
}
return nil
}
func (f *FolderConfiguration) HasMarker() bool {
_, err := f.Filesystem().Stat(".stfolder")
return err == nil
}
func (f *FolderConfiguration) CreateRoot() (err error) {
// Directory permission bits. Will be filtered down to something
// sane by umask on Unixes.
permBits := fs.FileMode(0777)
if runtime.GOOS == "windows" {
// Windows has no umask so we must chose a safer set of bits to
// begin with.
permBits = 0700
}
filesystem := f.Filesystem()
if _, err = filesystem.Stat("."); fs.IsNotExist(err) {
if err = filesystem.MkdirAll(".", permBits); err != nil {
l.Warnf("Creating directory for %v: %v", f.Description(), err)
}
}
return err
}
func (f FolderConfiguration) Description() string {
if f.Label == "" {
return f.ID
}
return fmt.Sprintf("%q (%s)", f.Label, f.ID)
}
func (f *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
deviceIDs := make([]protocol.DeviceID, len(f.Devices))
for i, n := range f.Devices {
deviceIDs[i] = n.DeviceID
}
return deviceIDs
}
func (f *FolderConfiguration) prepare() {
if f.Path != "" {
f.cachedFilesystem = fs.NewFilesystem(f.FilesystemType, f.Path)
}
if f.RescanIntervalS > MaxRescanIntervalS {
f.RescanIntervalS = MaxRescanIntervalS
} else if f.RescanIntervalS < 0 {
f.RescanIntervalS = 0
}
if f.FSWatcherDelayS <= 0 {
f.FSWatcherEnabled = false
f.FSWatcherDelayS = 10
}
if f.Versioning.Params == nil {
f.Versioning.Params = make(map[string]string)
}
if f.WeakHashThresholdPct == 0 {
f.WeakHashThresholdPct = 25
}
}
type FolderDeviceConfigurationList []FolderDeviceConfiguration
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l FolderDeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
lib/config: Improve debug logging around folder marker
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package config
import (
"fmt"
"runtime"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol"
)
type FolderConfiguration struct {
ID string `xml:"id,attr" json:"id"`
Label string `xml:"label,attr" json:"label"`
FilesystemType fs.FilesystemType `xml:"filesystemType" json:"filesystemType"`
Path string `xml:"path,attr" json:"path"`
Type FolderType `xml:"type,attr" json:"type"`
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
FSWatcherEnabled bool `xml:"fsWatcherEnabled,attr" json:"fsWatcherEnabled"`
FSWatcherDelayS int `xml:"fsWatcherDelayS,attr" json:"fsWatcherDelayS"`
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
MinDiskFree Size `xml:"minDiskFree" json:"minDiskFree"`
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
Order PullOrder `xml:"order" json:"order"`
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
ScanProgressIntervalS int `xml:"scanProgressIntervalS" json:"scanProgressIntervalS"` // Set to a negative value to disable. Value of 0 will get replaced with value of 2 (default value)
PullerSleepS int `xml:"pullerSleepS" json:"pullerSleepS"`
PullerPauseS int `xml:"pullerPauseS" json:"pullerPauseS"`
MaxConflicts int `xml:"maxConflicts" json:"maxConflicts"`
DisableSparseFiles bool `xml:"disableSparseFiles" json:"disableSparseFiles"`
DisableTempIndexes bool `xml:"disableTempIndexes" json:"disableTempIndexes"`
Paused bool `xml:"paused" json:"paused"`
WeakHashThresholdPct int `xml:"weakHashThresholdPct" json:"weakHashThresholdPct"` // Use weak hash if more than X percent of the file has changed. Set to -1 to always use weak hash.
cachedFilesystem fs.Filesystem
DeprecatedReadOnly bool `xml:"ro,attr,omitempty" json:"-"`
DeprecatedMinDiskFreePct float64 `xml:"minDiskFreePct,omitempty" json:"-"`
}
type FolderDeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr" json:"deviceID"`
IntroducedBy protocol.DeviceID `xml:"introducedBy,attr" json:"introducedBy"`
}
func NewFolderConfiguration(id string, fsType fs.FilesystemType, path string) FolderConfiguration {
f := FolderConfiguration{
ID: id,
FilesystemType: fsType,
Path: path,
}
f.prepare()
return f
}
func (f FolderConfiguration) Copy() FolderConfiguration {
c := f
c.Devices = make([]FolderDeviceConfiguration, len(f.Devices))
copy(c.Devices, f.Devices)
c.Versioning = f.Versioning.Copy()
return c
}
func (f FolderConfiguration) Filesystem() fs.Filesystem {
// This is intentionally not a pointer method, because things like
// cfg.Folders["default"].Filesystem() should be valid.
if f.cachedFilesystem == nil && f.Path != "" {
l.Infoln("bug: uncached filesystem call (should only happen in tests)")
return fs.NewFilesystem(f.FilesystemType, f.Path)
}
return f.cachedFilesystem
}
func (f *FolderConfiguration) CreateMarker() error {
if !f.HasMarker() {
permBits := fs.FileMode(0777)
if runtime.GOOS == "windows" {
// Windows has no umask so we must chose a safer set of bits to
// begin with.
permBits = 0700
}
fs := f.Filesystem()
err := fs.Mkdir(".stfolder", permBits)
if err != nil {
return err
}
if dir, err := fs.Open("."); err != nil {
l.Debugln("folder marker: open . failed:", err)
} else if err := dir.Sync(); err != nil {
l.Debugln("folder marker: fsync . failed:", err)
}
fs.Hide(".stfolder")
}
return nil
}
func (f *FolderConfiguration) HasMarker() bool {
_, err := f.Filesystem().Stat(".stfolder")
return err == nil
}
func (f *FolderConfiguration) CreateRoot() (err error) {
// Directory permission bits. Will be filtered down to something
// sane by umask on Unixes.
permBits := fs.FileMode(0777)
if runtime.GOOS == "windows" {
// Windows has no umask so we must chose a safer set of bits to
// begin with.
permBits = 0700
}
filesystem := f.Filesystem()
if _, err = filesystem.Stat("."); fs.IsNotExist(err) {
if err = filesystem.MkdirAll(".", permBits); err != nil {
l.Warnf("Creating directory for %v: %v", f.Description(), err)
}
}
return err
}
func (f FolderConfiguration) Description() string {
if f.Label == "" {
return f.ID
}
return fmt.Sprintf("%q (%s)", f.Label, f.ID)
}
func (f *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
deviceIDs := make([]protocol.DeviceID, len(f.Devices))
for i, n := range f.Devices {
deviceIDs[i] = n.DeviceID
}
return deviceIDs
}
func (f *FolderConfiguration) prepare() {
if f.Path != "" {
f.cachedFilesystem = fs.NewFilesystem(f.FilesystemType, f.Path)
}
if f.RescanIntervalS > MaxRescanIntervalS {
f.RescanIntervalS = MaxRescanIntervalS
} else if f.RescanIntervalS < 0 {
f.RescanIntervalS = 0
}
if f.FSWatcherDelayS <= 0 {
f.FSWatcherEnabled = false
f.FSWatcherDelayS = 10
}
if f.Versioning.Params == nil {
f.Versioning.Params = make(map[string]string)
}
if f.WeakHashThresholdPct == 0 {
f.WeakHashThresholdPct = 25
}
}
type FolderDeviceConfigurationList []FolderDeviceConfiguration
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l FolderDeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
|
package sous
import "time"
type (
// A Selector selects the buildpack for a given build context
Selector interface {
SelectBuildpack(*BuildContext) (Buildpack, error)
}
// Labeller defines a container-based build system.
Labeller interface {
ApplyMetadata(*BuildResult, *BuildContext) error
}
// Registrar defines the interface to register build results to be deployed
// later
Registrar interface {
// Register takes a BuildResult and makes it available for the deployment
// target system to find during deployment
Register(*BuildResult, *BuildContext) error
}
// BuildArtifact describes the actual built binary Sous will deploy
BuildArtifact struct {
Name, Type string
Qualities []Quality
}
// A Quality represents a characteristic of a BuildArtifact that needs to be recorded.
Quality struct {
Name string
// Kind is the the kind of this quality
// Known kinds include: advisory
Kind string
}
// Buildpack is a set of instructions used to build a particular
// kind of project.
Buildpack interface {
Detect(*BuildContext) (*DetectResult, error)
Build(*BuildContext) (*BuildResult, error)
}
// DetectResult represents the result of a detection.
DetectResult struct {
Compatible bool
Description string
Data interface{}
}
// BuildResult represents the result of a build made with a Buildpack.
BuildResult struct {
ImageID string
VersionName, RevisionName string
Advisories []string
Elapsed time.Duration
}
EchoSelector struct {
Factory func(*BuildContext) (Buildpack, error)
}
)
func (s *EchoSelector) SelectBuildpack(c *BuildContext) (Buildpack, error) {
return s.Factory(c)
}
Adds a String() method for BuildResult
package sous
import (
"fmt"
"strings"
"time"
)
type (
// A Selector selects the buildpack for a given build context
Selector interface {
SelectBuildpack(*BuildContext) (Buildpack, error)
}
// Labeller defines a container-based build system.
Labeller interface {
ApplyMetadata(*BuildResult, *BuildContext) error
}
// Registrar defines the interface to register build results to be deployed
// later
Registrar interface {
// Register takes a BuildResult and makes it available for the deployment
// target system to find during deployment
Register(*BuildResult, *BuildContext) error
}
// BuildArtifact describes the actual built binary Sous will deploy
BuildArtifact struct {
Name, Type string
Qualities []Quality
}
// A Quality represents a characteristic of a BuildArtifact that needs to be recorded.
Quality struct {
Name string
// Kind is the the kind of this quality
// Known kinds include: advisory
Kind string
}
// Buildpack is a set of instructions used to build a particular
// kind of project.
Buildpack interface {
Detect(*BuildContext) (*DetectResult, error)
Build(*BuildContext) (*BuildResult, error)
}
// DetectResult represents the result of a detection.
DetectResult struct {
Compatible bool
Description string
Data interface{}
}
// BuildResult represents the result of a build made with a Buildpack.
BuildResult struct {
ImageID string
VersionName, RevisionName string
Advisories []string
Elapsed time.Duration
}
EchoSelector struct {
Factory func(*BuildContext) (Buildpack, error)
}
)
func (s *EchoSelector) SelectBuildpack(c *BuildContext) (Buildpack, error) {
return s.Factory(c)
}
func (br *BuildResult) String() string {
str := fmt.Sprintf("Built: %q", br.ImageID)
if len(br.Advisories) > 0 {
str = str + "\nAdvisories:\n " + strings.Join(br.Advisories, " \n")
}
return fmt.Sprintf("%s\nElapsed: %v", str, br.Elapsed)
}
|
package vxlan
import (
gonet "net"
"strconv"
"errors"
log "github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/network"
"github.com/samalba/dockerclient"
"github.com/vishvananda/netlink"
)
type Driver struct {
network.Driver
scope string
vtepdev string
networks map[string]*NetworkState
docker *dockerclient.DockerClient
}
// NetworkState is filled in at network creation time
// it contains state that we wish to keep for each network
type NetworkState struct {
Bridge *netlink.Bridge
VXLan *netlink.Vxlan
Gateway string
IPv4Data []*network.IPAMData
IPv6Data []*network.IPAMData
}
func NewDriver(scope string, vtepdev string) (*Driver, error) {
docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil)
if err != nil {
return nil, err
}
d := &Driver{
scope: scope,
vtepdev: vtepdev,
networks: make(map[string]*NetworkState),
docker: docker,
}
return d, nil
}
func (d *Driver) GetCapabilities() (*network.CapabilitiesResponse, error) {
log.Debugf("Get Capabilities request")
res := &network.CapabilitiesResponse{
Scope: d.scope,
}
log.Debugf("Responding with %+v", res)
return res, nil
}
type intNames struct {
VxlanName string
BridgeName string
}
func getIntNames(netID string, docker *dockerclient.DockerClient) (*intNames, error) {
net, err := docker.InspectNetwork(netID)
if err != nil {
return nil, err
}
names := &intNames{}
if net.Driver != "vxlan" {
log.Errorf("Network %v is not a vxlan network", netID)
return nil, errors.New("Not a vxlan network")
}
names.BridgeName = "br_" + netID[:12]
names.VxlanName = "vx_" + netID[:12]
// get interface names from options first
for k, v := range net.Options {
if k == "vxlanName" {
names.VxlanName = v
}
if k == "bridgeName" {
names.BridgeName = v
}
}
return names, nil
}
func getGateway(netID string, docker dockerclient.DockerClient) (string, error) {
net, err := docker.InspectNetwork(netID)
if err != nil {
return "", err
}
for i := range net.IPAM.Config {
if net.IPAM.Config[i].Gateway != "" {
return net.IPAM.Config[i].Gateway, nil
}
}
return "", nil
}
type intLinks struct {
Vxlan *netlink.Vxlan
Bridge *netlink.Bridge
}
// this function gets netlink devices or creates them if they don't exist
func (d *Driver) getLinks(netID string) (*intLinks, error) {
docker := d.docker
net, err := docker.InspectNetwork(netID)
if err != nil {
return nil, err
}
if net.Driver != "vxlan" {
log.Errorf("Network %v is not a vxlan network", netID)
return nil, errors.New("Not a vxlan network")
}
names, err := getIntNames(netID, docker)
if err != nil {
return nil, err
}
// get or create links
var bridge *netlink.Bridge
bridgelink, err := netlink.LinkByName(names.BridgeName)
if err == nil {
bridge = &netlink.Bridge{
LinkAttrs: *bridgelink.Attrs(),
}
} else {
bridge, err = d.createBridge(names.BridgeName, net)
if err != nil {
return nil, err
}
}
var vxlan *netlink.Vxlan
vxlanlink, err := netlink.LinkByName(names.VxlanName)
if err == nil {
vxlan = &netlink.Vxlan{
LinkAttrs: *vxlanlink.Attrs(),
}
} else {
vxlan, err = d.createVxLan(names.VxlanName, net)
if err != nil {
return nil, err
}
}
// add vxlan to bridge
if vxlan.LinkAttrs.MasterIndex == 0 {
err = netlink.LinkSetMaster(vxlan, bridge)
if err != nil {
return nil, err
}
}
links := &intLinks{
Vxlan: vxlan,
Bridge: bridge,
}
return links, nil
}
func (d *Driver) createBridge(bridgeName string, net *dockerclient.NetworkResource) (*netlink.Bridge, error) {
bridge := &netlink.Bridge{
LinkAttrs: netlink.LinkAttrs{
Name: bridgeName,
},
}
// Parse interface options
for k, v := range net.Options {
if k == "bridgeMTU" {
mtu, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.MTU = mtu
}
if k == "bridgeHardwareAddr" {
hardwareAddr, err := gonet.ParseMAC(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.HardwareAddr = hardwareAddr
}
if k == "bridgeTxQLen" {
txQLen, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.TxQLen = txQLen
}
}
err := netlink.LinkAdd(bridge)
if err != nil {
return nil, err
}
err = netlink.LinkSetUp(bridge)
if err != nil {
return nil, err
}
if d.scope == "local" {
for i := range net.IPAM.Config {
gatewayIP, err := netlink.ParseAddr(net.IPAM.Config[i].Gateway)
if err != nil {
return nil, err
}
netlink.AddrAdd(bridge, gatewayIP)
}
}
return bridge, nil
}
func (d *Driver) createVxLan(vxlanName string, net *dockerclient.NetworkResource) (*netlink.Vxlan, error) {
vxlan := &netlink.Vxlan{
LinkAttrs: netlink.LinkAttrs{
Name: vxlanName,
},
}
// Parse interface options
for k, v := range net.Options {
if k == "vxlanMTU" {
MTU, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.MTU = MTU
}
if k == "vxlanHardwareAddr" {
HardwareAddr, err := gonet.ParseMAC(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.HardwareAddr = HardwareAddr
}
if k == "vxlanTxQLen" {
TxQLen, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.TxQLen = TxQLen
}
if k == "VxlanId" {
log.Debugf("VxlanID: %+v", v)
VxlanId, err := strconv.ParseInt(v, 0, 32)
if err != nil {
return nil, err
}
log.Debugf("VxlanID: %+v", VxlanId)
log.Debugf("int(VxlanID): %+v", int(VxlanId))
vxlan.VxlanId = int(VxlanId)
}
if k == "VtepDev" {
vtepDev, err := netlink.LinkByName(v)
if err != nil {
return nil, err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
if k == "SrcAddr" {
vxlan.SrcAddr = gonet.ParseIP(v)
}
if k == "Group" {
vxlan.Group = gonet.ParseIP(v)
}
if k == "TTL" {
TTL, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.TTL = TTL
}
if k == "TOS" {
TOS, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.TOS = TOS
}
if k == "Learning" {
Learning, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.Learning = Learning
}
if k == "Proxy" {
Proxy, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.Proxy = Proxy
}
if k == "RSC" {
RSC, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.RSC = RSC
}
if k == "L2miss" {
L2miss, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.L2miss = L2miss
}
if k == "L3miss" {
L3miss, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.L3miss = L3miss
}
if k == "NoAge" {
NoAge, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.NoAge = NoAge
}
if k == "GBP" {
GBP, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.GBP = GBP
}
if k == "Age" {
Age, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Age = Age
}
if k == "Limit" {
Limit, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Limit = Limit
}
if k == "Port" {
Port, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Port = Port
}
if k == "PortLow" {
PortLow, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.PortLow = PortLow
}
if k == "PortHigh" {
PortHigh, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.PortHigh = PortHigh
}
}
if d.vtepdev != "" {
vtepDev, err := netlink.LinkByName(d.vtepdev)
if err != nil {
return nil, err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
err := netlink.LinkAdd(vxlan)
if err != nil {
return nil, err
}
// bring interfaces up
err = netlink.LinkSetUp(vxlan)
if err != nil {
return nil, err
}
return vxlan, nil
}
func (d *Driver) CreateNetwork(r *network.CreateNetworkRequest) error {
log.Debugf("Create network request: %+v", r)
// return nil and lazy create the network when a container joins it
return nil
}
func (d *Driver) deleteNics(netID string) error {
names, err := getIntNames(netID, d.docker)
if err != nil {
return err
}
vxlan, err := netlink.LinkByName(names.VxlanName)
if err == nil {
err := netlink.LinkDel(vxlan)
if err != nil {
return err
}
log.Debugf("Deleting interface %+v", names.VxlanName)
}
bridge, err := netlink.LinkByName(names.BridgeName)
if err == nil {
err := netlink.LinkDel(bridge)
if err != nil {
return err
}
log.Debugf("Deleting interface %+v", names.BridgeName)
}
return nil
}
func (d *Driver) DeleteNetwork(r *network.DeleteNetworkRequest) error {
netID := r.NetworkID
return d.deleteNics(netID)
}
func (d *Driver) CreateEndpoint(r *network.CreateEndpointRequest) error {
log.Debugf("Create endpoint request: %+v", r)
netID := r.NetworkID
// get the links
_, err := d.getLinks(netID)
if err != nil {
return err
}
return nil
}
func (d *Driver) DeleteEndpoint(r *network.DeleteEndpointRequest) error {
log.Debugf("Delete endpoint request: %+v", r)
netID := r.NetworkID
docker := d.docker
net, err := docker.InspectNetwork(netID)
if err != nil {
return err
}
if len(net.Containers) == 0 {
log.Debugf("No remaining containers, deleting vxlan and bridge interfaces.")
return d.deleteNics(netID)
}
return nil
}
func (d *Driver) EndpointInfo(r *network.InfoRequest) (*network.InfoResponse, error) {
res := &network.InfoResponse{
Value: make(map[string]string),
}
return res, nil
}
func (d *Driver) Join(r *network.JoinRequest) (*network.JoinResponse, error) {
netID := r.NetworkID
// get the links
links, err := d.getLinks(netID)
if err != nil {
return nil, err
}
// create and attach local name to the bridge
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5],
MTU: links.Bridge.LinkAttrs.MTU },
PeerName: "ethc" + r.EndpointID[:5],
}
if err := netlink.LinkAdd(veth); err != nil {
log.Errorf("failed to create the veth pair named: [ %v ] error: [ %s ] ", veth, err)
return nil, err
}
// bring up the veth pair
err = netlink.LinkSetUp(veth)
if err != nil {
log.Warnf("Error enabling Veth local iface: [ %v ]", veth)
return nil, err
}
// add veth to bridge
err = netlink.LinkSetMaster(veth, links.Bridge)
if err != nil {
return nil, err
}
// SrcName gets renamed to DstPrefix + ID on the container iface
gateway, err := getGateway(netID, *d.docker)
if err != nil {
return nil, err
}
res := &network.JoinResponse{
InterfaceName: network.InterfaceName{
SrcName: veth.PeerName,
DstPrefix: "eth",
},
Gateway: gateway,
}
log.Debugf("Join endpoint %s:%s to %s", r.NetworkID, r.EndpointID, r.SandboxKey)
return res, nil
}
func (d *Driver) Leave(r *network.LeaveRequest) error {
log.Debugf("Leave request: %+v", r)
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5]},
PeerName: "ethc" + r.EndpointID[:5],
}
// bring down the veth pair
err := netlink.LinkSetDown(veth)
if err != nil {
log.Warnf("Error bring down Veth local iface: [ %v ]", veth)
return err
}
// remove veth from bridge
err = netlink.LinkSetNoMaster(veth)
if err != nil {
log.Warnf("Error removing veth from bridge")
return err
}
// delete the veth interface
err = netlink.LinkDel(veth)
if err != nil {
log.Warnf("Error removing veth interface")
return err
}
return nil
}
delete nics only if the bridge has no other devices than the vxlan
package vxlan
import (
gonet "net"
"strconv"
"errors"
log "github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/network"
"github.com/samalba/dockerclient"
"github.com/vishvananda/netlink"
)
type Driver struct {
network.Driver
scope string
vtepdev string
networks map[string]*NetworkState
docker *dockerclient.DockerClient
}
// NetworkState is filled in at network creation time
// it contains state that we wish to keep for each network
type NetworkState struct {
Bridge *netlink.Bridge
VXLan *netlink.Vxlan
Gateway string
IPv4Data []*network.IPAMData
IPv6Data []*network.IPAMData
}
func NewDriver(scope string, vtepdev string) (*Driver, error) {
docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil)
if err != nil {
return nil, err
}
d := &Driver{
scope: scope,
vtepdev: vtepdev,
networks: make(map[string]*NetworkState),
docker: docker,
}
return d, nil
}
func (d *Driver) GetCapabilities() (*network.CapabilitiesResponse, error) {
log.Debugf("Get Capabilities request")
res := &network.CapabilitiesResponse{
Scope: d.scope,
}
log.Debugf("Responding with %+v", res)
return res, nil
}
type intNames struct {
VxlanName string
BridgeName string
}
func getIntNames(netID string, docker *dockerclient.DockerClient) (*intNames, error) {
net, err := docker.InspectNetwork(netID)
if err != nil {
return nil, err
}
names := &intNames{}
if net.Driver != "vxlan" {
log.Errorf("Network %v is not a vxlan network", netID)
return nil, errors.New("Not a vxlan network")
}
names.BridgeName = "br_" + netID[:12]
names.VxlanName = "vx_" + netID[:12]
// get interface names from options first
for k, v := range net.Options {
if k == "vxlanName" {
names.VxlanName = v
}
if k == "bridgeName" {
names.BridgeName = v
}
}
return names, nil
}
func getGateway(netID string, docker dockerclient.DockerClient) (string, error) {
net, err := docker.InspectNetwork(netID)
if err != nil {
return "", err
}
for i := range net.IPAM.Config {
if net.IPAM.Config[i].Gateway != "" {
return net.IPAM.Config[i].Gateway, nil
}
}
return "", nil
}
type intLinks struct {
Vxlan *netlink.Vxlan
Bridge *netlink.Bridge
}
// this function gets netlink devices or creates them if they don't exist
func (d *Driver) getLinks(netID string) (*intLinks, error) {
docker := d.docker
net, err := docker.InspectNetwork(netID)
if err != nil {
return nil, err
}
if net.Driver != "vxlan" {
log.Errorf("Network %v is not a vxlan network", netID)
return nil, errors.New("Not a vxlan network")
}
names, err := getIntNames(netID, docker)
if err != nil {
return nil, err
}
// get or create links
var bridge *netlink.Bridge
bridgelink, err := netlink.LinkByName(names.BridgeName)
if err == nil {
bridge = &netlink.Bridge{
LinkAttrs: *bridgelink.Attrs(),
}
} else {
bridge, err = d.createBridge(names.BridgeName, net)
if err != nil {
return nil, err
}
}
var vxlan *netlink.Vxlan
vxlanlink, err := netlink.LinkByName(names.VxlanName)
if err == nil {
vxlan = &netlink.Vxlan{
LinkAttrs: *vxlanlink.Attrs(),
}
} else {
vxlan, err = d.createVxLan(names.VxlanName, net)
if err != nil {
return nil, err
}
}
// add vxlan to bridge
if vxlan.LinkAttrs.MasterIndex == 0 {
err = netlink.LinkSetMaster(vxlan, bridge)
if err != nil {
return nil, err
}
}
links := &intLinks{
Vxlan: vxlan,
Bridge: bridge,
}
return links, nil
}
func (d *Driver) createBridge(bridgeName string, net *dockerclient.NetworkResource) (*netlink.Bridge, error) {
bridge := &netlink.Bridge{
LinkAttrs: netlink.LinkAttrs{
Name: bridgeName,
},
}
// Parse interface options
for k, v := range net.Options {
if k == "bridgeMTU" {
mtu, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.MTU = mtu
}
if k == "bridgeHardwareAddr" {
hardwareAddr, err := gonet.ParseMAC(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.HardwareAddr = hardwareAddr
}
if k == "bridgeTxQLen" {
txQLen, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
bridge.LinkAttrs.TxQLen = txQLen
}
}
err := netlink.LinkAdd(bridge)
if err != nil {
return nil, err
}
err = netlink.LinkSetUp(bridge)
if err != nil {
return nil, err
}
if d.scope == "local" {
for i := range net.IPAM.Config {
gatewayIP, err := netlink.ParseAddr(net.IPAM.Config[i].Gateway)
if err != nil {
return nil, err
}
netlink.AddrAdd(bridge, gatewayIP)
}
}
return bridge, nil
}
func (d *Driver) createVxLan(vxlanName string, net *dockerclient.NetworkResource) (*netlink.Vxlan, error) {
vxlan := &netlink.Vxlan{
LinkAttrs: netlink.LinkAttrs{
Name: vxlanName,
},
}
// Parse interface options
for k, v := range net.Options {
if k == "vxlanMTU" {
MTU, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.MTU = MTU
}
if k == "vxlanHardwareAddr" {
HardwareAddr, err := gonet.ParseMAC(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.HardwareAddr = HardwareAddr
}
if k == "vxlanTxQLen" {
TxQLen, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.LinkAttrs.TxQLen = TxQLen
}
if k == "VxlanId" {
log.Debugf("VxlanID: %+v", v)
VxlanId, err := strconv.ParseInt(v, 0, 32)
if err != nil {
return nil, err
}
log.Debugf("VxlanID: %+v", VxlanId)
log.Debugf("int(VxlanID): %+v", int(VxlanId))
vxlan.VxlanId = int(VxlanId)
}
if k == "VtepDev" {
vtepDev, err := netlink.LinkByName(v)
if err != nil {
return nil, err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
if k == "SrcAddr" {
vxlan.SrcAddr = gonet.ParseIP(v)
}
if k == "Group" {
vxlan.Group = gonet.ParseIP(v)
}
if k == "TTL" {
TTL, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.TTL = TTL
}
if k == "TOS" {
TOS, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.TOS = TOS
}
if k == "Learning" {
Learning, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.Learning = Learning
}
if k == "Proxy" {
Proxy, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.Proxy = Proxy
}
if k == "RSC" {
RSC, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.RSC = RSC
}
if k == "L2miss" {
L2miss, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.L2miss = L2miss
}
if k == "L3miss" {
L3miss, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.L3miss = L3miss
}
if k == "NoAge" {
NoAge, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.NoAge = NoAge
}
if k == "GBP" {
GBP, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
vxlan.GBP = GBP
}
if k == "Age" {
Age, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Age = Age
}
if k == "Limit" {
Limit, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Limit = Limit
}
if k == "Port" {
Port, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.Port = Port
}
if k == "PortLow" {
PortLow, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.PortLow = PortLow
}
if k == "PortHigh" {
PortHigh, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
vxlan.PortHigh = PortHigh
}
}
if d.vtepdev != "" {
vtepDev, err := netlink.LinkByName(d.vtepdev)
if err != nil {
return nil, err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
err := netlink.LinkAdd(vxlan)
if err != nil {
return nil, err
}
// bring interfaces up
err = netlink.LinkSetUp(vxlan)
if err != nil {
return nil, err
}
return vxlan, nil
}
func (d *Driver) CreateNetwork(r *network.CreateNetworkRequest) error {
log.Debugf("Create network request: %+v", r)
// return nil and lazy create the network when a container joins it
return nil
}
func (d *Driver) deleteNics(netID string) error {
names, err := getIntNames(netID, d.docker)
if err != nil {
return err
}
vxlan, err := netlink.LinkByName(names.VxlanName)
if err == nil {
err := netlink.LinkDel(vxlan)
if err != nil {
return err
}
log.Debugf("Deleting interface %+v", names.VxlanName)
}
bridge, err := netlink.LinkByName(names.BridgeName)
if err == nil {
err := netlink.LinkDel(bridge)
if err != nil {
return err
}
log.Debugf("Deleting interface %+v", names.BridgeName)
}
return nil
}
func (d *Driver) DeleteNetwork(r *network.DeleteNetworkRequest) error {
netID := r.NetworkID
return d.deleteNics(netID)
}
func (d *Driver) CreateEndpoint(r *network.CreateEndpointRequest) error {
log.Debugf("Create endpoint request: %+v", r)
netID := r.NetworkID
// get the links
_, err := d.getLinks(netID)
if err != nil {
return err
}
return nil
}
func (d *Driver) DeleteEndpoint(r *network.DeleteEndpointRequest) error {
log.Debugf("Delete endpoint request: %+v", r)
netID := r.NetworkID
links, err := d.getLinks(netID)
if err != nil {
return err
}
VxlanIndex = links.Vxlan.LinkAttrs.Index
BridgeIndex = links.Bridge.LinkAttrs.Index
allLinks, err := netlink.LinkList()
if err != nil {
return err
}
for link := range allLinks {
if link.LinkAttrs.Index != VxlanIndex && link.LinkAttrs.MasterIndex == BridgeIndex {
return nil
}
}
log.Debugf("No interfaces attached to bridge: deleting vxlan and bridge interfaces.")
return d.deleteNics(netID)
}
func (d *Driver) EndpointInfo(r *network.InfoRequest) (*network.InfoResponse, error) {
res := &network.InfoResponse{
Value: make(map[string]string),
}
return res, nil
}
func (d *Driver) Join(r *network.JoinRequest) (*network.JoinResponse, error) {
netID := r.NetworkID
// get the links
links, err := d.getLinks(netID)
if err != nil {
return nil, err
}
// create and attach local name to the bridge
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5],
MTU: links.Bridge.LinkAttrs.MTU },
PeerName: "ethc" + r.EndpointID[:5],
}
if err := netlink.LinkAdd(veth); err != nil {
log.Errorf("failed to create the veth pair named: [ %v ] error: [ %s ] ", veth, err)
return nil, err
}
// bring up the veth pair
err = netlink.LinkSetUp(veth)
if err != nil {
log.Warnf("Error enabling Veth local iface: [ %v ]", veth)
return nil, err
}
// add veth to bridge
err = netlink.LinkSetMaster(veth, links.Bridge)
if err != nil {
return nil, err
}
// SrcName gets renamed to DstPrefix + ID on the container iface
gateway, err := getGateway(netID, *d.docker)
if err != nil {
return nil, err
}
res := &network.JoinResponse{
InterfaceName: network.InterfaceName{
SrcName: veth.PeerName,
DstPrefix: "eth",
},
Gateway: gateway,
}
log.Debugf("Join endpoint %s:%s to %s", r.NetworkID, r.EndpointID, r.SandboxKey)
return res, nil
}
func (d *Driver) Leave(r *network.LeaveRequest) error {
log.Debugf("Leave request: %+v", r)
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5]},
PeerName: "ethc" + r.EndpointID[:5],
}
// bring down the veth pair
err := netlink.LinkSetDown(veth)
if err != nil {
log.Warnf("Error bring down Veth local iface: [ %v ]", veth)
return err
}
// remove veth from bridge
err = netlink.LinkSetNoMaster(veth)
if err != nil {
log.Warnf("Error removing veth from bridge")
return err
}
// delete the veth interface
err = netlink.LinkDel(veth)
if err != nil {
log.Warnf("Error removing veth interface")
return err
}
return nil
}
|
package binary
import (
"bytes"
"errors"
"reflect"
"strings"
"unsafe"
)
// Marshaler is a interface implemented by types that can marshal themselves into binary stream data.
type Marshaler interface {
MarshalStream(*bytes.Buffer) error
}
// Unmarshaler is a interface implemented by types that can unmarshal binary stream data to themselves.
// If a type is struct, UnmarshalStream should aware that its fields can be not initialized.
type Unmarshaler interface {
UnmarshalStream(*bytes.Buffer) error
}
// Marshal encodes struct v into buf.
// If v is not a struct, Marshal returns an InvalidMarshalTypeError.
func Marshal(v interface{}, buf *bytes.Buffer) error {
vv := reflect.ValueOf(v)
t := vv.Type()
if t.Kind() != reflect.Struct {
return InvalidMarshalTypeError{t}
}
for i := 0; i < vv.NumField(); i++ {
field := vv.Field(i)
tag := strings.ToLower(t.Field(i).Tag.Get("stream"))
if tag == "pass" || !field.CanInterface() {
continue
}
sp := strings.Split(tag, ",")
option := make(map[string]struct{})
for _, v := range sp {
option[v] = struct{}{}
}
fv := field.Interface()
if marshaler, ok := fv.(Marshaler); ok {
if err := marshaler.MarshalStream(buf); err != nil {
return err
}
continue
}
var endian ByteOrder = BigEndian
if _, ok := option["little"]; ok {
endian = LittleEndian
}
if field.Kind() == reflect.Slice && i+1 != vv.NumField() {
return MarshalError{
errors.New("Exported slice element should be last while marshaling"),
t.Field(i),
}
}
if err := pack(field, buf, endian); err != nil {
return MarshalError{err, t.Field(i)}
}
}
return nil
}
func pack(v reflect.Value, buf *bytes.Buffer, endian ByteOrder) error {
kind := v.Kind()
fv := v.Interface()
switch kind {
case reflect.Int, reflect.Uint:
break // explicit break expression to clarify separated case with "bool"
case reflect.Bool:
fv, ok := fv.(bool)
if !ok {
return TypeAssertionError{"bool"}
}
var err error
if fv {
err = buf.WriteByte(1)
} else {
err = buf.WriteByte(0)
}
if err != nil {
return err
}
case reflect.Int8:
fv, ok := fv.(int8)
if !ok {
return TypeAssertionError{"int8"}
}
if err := buf.WriteByte(*(*byte)(unsafe.Pointer(&fv))); err != nil {
return err
}
case reflect.Uint8:
fv, ok := fv.(byte)
if !ok {
return TypeAssertionError{"byte"}
}
if err := buf.WriteByte(fv); err != nil {
return err
}
case reflect.Int16:
fv, ok := fv.(int16)
if !ok {
return TypeAssertionError{"int16"}
}
b := make([]byte, 2)
endian.PutInt16(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint16:
fv, ok := fv.(uint16)
if !ok {
return TypeAssertionError{"uint16"}
}
b := make([]byte, 2)
endian.PutUint16(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Int32:
fv, ok := fv.(int32)
if !ok {
return TypeAssertionError{"int32"}
}
b := make([]byte, 4)
endian.PutInt32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint32:
fv, ok := fv.(uint32)
if !ok {
return TypeAssertionError{"uint32"}
}
b := make([]byte, 4)
endian.PutUint32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Int64:
fv, ok := fv.(int64)
if !ok {
return TypeAssertionError{"int64"}
}
b := make([]byte, 8)
endian.PutInt64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint64:
fv, ok := fv.(uint64)
if !ok {
return TypeAssertionError{"uint64"}
}
b := make([]byte, 8)
endian.PutUint64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Float32:
fv, ok := fv.(float32)
if !ok {
return TypeAssertionError{"float32"}
}
b := make([]byte, 4)
endian.PutFloat32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Float64:
fv, ok := fv.(float64)
if !ok {
return TypeAssertionError{"float64"}
}
b := make([]byte, 8)
endian.PutFloat64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Struct:
if err := Marshal(fv, buf); err != nil {
return err
}
case reflect.Array, reflect.Slice:
length := v.Len()
if v.Type().Elem().Kind() == reflect.Uint8 {
if _, err := buf.Write(v.Slice(0, length).Interface().([]byte)); err != nil {
return err
}
break
}
for j := 0; j < length; j++ {
if err := pack(v.Index(j), buf, endian); err != nil {
return err
}
}
}
return nil
}
// Unmarshal parses binary stream data from buf and stores the result in the struct pointed by v.
func Unmarshal(v interface{}, buf *bytes.Buffer) error {
vv := reflect.ValueOf(v).Elem()
t := vv.Type()
for i := 0; i < t.NumField(); i++ {
field := vv.Field(i)
tag := strings.ToLower(t.Field(i).Tag.Get("stream"))
if tag == "pass" || !field.CanSet() {
continue
}
sp := strings.Split(tag, ",")
option := make(map[string]struct{})
for _, v := range sp {
option[v] = struct{}{}
}
fv := field.Addr().Interface()
if unmarshaler, ok := fv.(Unmarshaler); ok {
if err := unmarshaler.UnmarshalStream(buf); err != nil {
return err
}
continue
}
var endian ByteOrder = BigEndian
if _, ok := option["little"]; ok {
endian = LittleEndian
}
if field.Kind() == reflect.Slice && i+1 != vv.NumField() {
return UnmarshalError{
errors.New("Exported slice element should be last while unmarshaling"),
t.Field(i),
}
}
if err := unpack(field, buf, endian); err != nil {
return UnmarshalError{err, t.Field(i)}
}
}
return nil
}
func unpack(v reflect.Value, buf *bytes.Buffer, endian ByteOrder) error {
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Uint:
break // explicit break expression to clarify separated case with "bool"
case reflect.Bool:
x, err := buf.ReadByte()
if err != nil {
return err
}
if x != 0 {
v.SetBool(true)
}
case reflect.Int8:
x, err := buf.ReadByte()
if err != nil {
return err
}
v.SetInt(int64(*(*int8)(unsafe.Pointer(&x))))
case reflect.Uint8:
x, err := buf.ReadByte()
if err != nil {
return err
}
v.SetUint(uint64(x))
case reflect.Int16:
b := make([]byte, 2)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(int64(endian.Int16(b)))
case reflect.Uint16:
b := make([]byte, 2)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(uint64(endian.Uint16(b)))
case reflect.Int32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(int64(endian.Int32(b)))
case reflect.Uint32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(uint64(endian.Uint32(b)))
case reflect.Int64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(endian.Int64(b))
case reflect.Uint64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(endian.Uint64(b))
case reflect.Float32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetFloat(float64(endian.Float32(b)))
case reflect.Float64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetFloat(endian.Float64(b))
case reflect.Struct:
if err := Unmarshal(v.Addr().Interface(), buf); err != nil {
return err
}
case reflect.Array, reflect.Slice:
length := v.Len()
if v.Type().Elem().Kind() == reflect.Uint8 {
if _, err := buf.Read(v.Slice(0, length).Interface().([]byte)); err != nil {
return err
}
break
}
for j := 0; j < length; j++ {
if err := unpack(v.Index(j), buf, endian); err != nil {
return err
}
}
}
return nil
}
type InvalidMarshalTypeError struct {
T reflect.Type
}
func (err InvalidMarshalTypeError) Error() string {
return "The second argument of Marshal should be struct; given " + err.T.Kind().String()
}
type InvalidUnmarshalTypeError struct {
T reflect.Type
}
func (err InvalidUnmarshalTypeError) Error() string {
return "The second argument of Marshal should be struct pointer; given " + err.T.Kind().String()
}
type TypeAssertionError struct {
totype string
}
func (err TypeAssertionError) Error() string {
return "Type assertion failed to " + err.totype
}
type MarshalError struct {
E error
field reflect.StructField
}
func (err MarshalError) Error() string {
return "Error while marshaling " + err.field.Name + ": " + err.E.Error()
}
type UnmarshalError struct {
E error
field reflect.StructField
}
func (err UnmarshalError) Error() string {
return "Error while marshaling " + err.field.Name + ": " + err.E.Error()
}
Fix incorrect UnmarshalError message
package binary
import (
"bytes"
"errors"
"reflect"
"strings"
"unsafe"
)
// Marshaler is a interface implemented by types that can marshal themselves into binary stream data.
type Marshaler interface {
MarshalStream(*bytes.Buffer) error
}
// Unmarshaler is a interface implemented by types that can unmarshal binary stream data to themselves.
// If a type is struct, UnmarshalStream should aware that its fields can be not initialized.
type Unmarshaler interface {
UnmarshalStream(*bytes.Buffer) error
}
// Marshal encodes struct v into buf.
// If v is not a struct, Marshal returns an InvalidMarshalTypeError.
func Marshal(v interface{}, buf *bytes.Buffer) error {
vv := reflect.ValueOf(v)
t := vv.Type()
if t.Kind() != reflect.Struct {
return InvalidMarshalTypeError{t}
}
for i := 0; i < vv.NumField(); i++ {
field := vv.Field(i)
tag := strings.ToLower(t.Field(i).Tag.Get("stream"))
if tag == "pass" || !field.CanInterface() {
continue
}
sp := strings.Split(tag, ",")
option := make(map[string]struct{})
for _, v := range sp {
option[v] = struct{}{}
}
fv := field.Interface()
if marshaler, ok := fv.(Marshaler); ok {
if err := marshaler.MarshalStream(buf); err != nil {
return err
}
continue
}
var endian ByteOrder = BigEndian
if _, ok := option["little"]; ok {
endian = LittleEndian
}
if field.Kind() == reflect.Slice && i+1 != vv.NumField() {
return MarshalError{
errors.New("Exported slice element should be last while marshaling"),
t.Field(i),
}
}
if err := pack(field, buf, endian); err != nil {
return MarshalError{err, t.Field(i)}
}
}
return nil
}
func pack(v reflect.Value, buf *bytes.Buffer, endian ByteOrder) error {
kind := v.Kind()
fv := v.Interface()
switch kind {
case reflect.Int, reflect.Uint:
break // explicit break expression to clarify separated case with "bool"
case reflect.Bool:
fv, ok := fv.(bool)
if !ok {
return TypeAssertionError{"bool"}
}
var err error
if fv {
err = buf.WriteByte(1)
} else {
err = buf.WriteByte(0)
}
if err != nil {
return err
}
case reflect.Int8:
fv, ok := fv.(int8)
if !ok {
return TypeAssertionError{"int8"}
}
if err := buf.WriteByte(*(*byte)(unsafe.Pointer(&fv))); err != nil {
return err
}
case reflect.Uint8:
fv, ok := fv.(byte)
if !ok {
return TypeAssertionError{"byte"}
}
if err := buf.WriteByte(fv); err != nil {
return err
}
case reflect.Int16:
fv, ok := fv.(int16)
if !ok {
return TypeAssertionError{"int16"}
}
b := make([]byte, 2)
endian.PutInt16(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint16:
fv, ok := fv.(uint16)
if !ok {
return TypeAssertionError{"uint16"}
}
b := make([]byte, 2)
endian.PutUint16(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Int32:
fv, ok := fv.(int32)
if !ok {
return TypeAssertionError{"int32"}
}
b := make([]byte, 4)
endian.PutInt32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint32:
fv, ok := fv.(uint32)
if !ok {
return TypeAssertionError{"uint32"}
}
b := make([]byte, 4)
endian.PutUint32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Int64:
fv, ok := fv.(int64)
if !ok {
return TypeAssertionError{"int64"}
}
b := make([]byte, 8)
endian.PutInt64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Uint64:
fv, ok := fv.(uint64)
if !ok {
return TypeAssertionError{"uint64"}
}
b := make([]byte, 8)
endian.PutUint64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Float32:
fv, ok := fv.(float32)
if !ok {
return TypeAssertionError{"float32"}
}
b := make([]byte, 4)
endian.PutFloat32(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Float64:
fv, ok := fv.(float64)
if !ok {
return TypeAssertionError{"float64"}
}
b := make([]byte, 8)
endian.PutFloat64(b, fv)
if _, err := buf.Write(b); err != nil {
return err
}
case reflect.Struct:
if err := Marshal(fv, buf); err != nil {
return err
}
case reflect.Array, reflect.Slice:
length := v.Len()
if v.Type().Elem().Kind() == reflect.Uint8 {
if _, err := buf.Write(v.Slice(0, length).Interface().([]byte)); err != nil {
return err
}
break
}
for j := 0; j < length; j++ {
if err := pack(v.Index(j), buf, endian); err != nil {
return err
}
}
}
return nil
}
// Unmarshal parses binary stream data from buf and stores the result in the struct pointed by v.
func Unmarshal(v interface{}, buf *bytes.Buffer) error {
vv := reflect.ValueOf(v).Elem()
t := vv.Type()
for i := 0; i < t.NumField(); i++ {
field := vv.Field(i)
tag := strings.ToLower(t.Field(i).Tag.Get("stream"))
if tag == "pass" || !field.CanSet() {
continue
}
sp := strings.Split(tag, ",")
option := make(map[string]struct{})
for _, v := range sp {
option[v] = struct{}{}
}
fv := field.Addr().Interface()
if unmarshaler, ok := fv.(Unmarshaler); ok {
if err := unmarshaler.UnmarshalStream(buf); err != nil {
return err
}
continue
}
var endian ByteOrder = BigEndian
if _, ok := option["little"]; ok {
endian = LittleEndian
}
if field.Kind() == reflect.Slice && i+1 != vv.NumField() {
return UnmarshalError{
errors.New("Exported slice element should be last while unmarshaling"),
t.Field(i),
}
}
if err := unpack(field, buf, endian); err != nil {
return UnmarshalError{err, t.Field(i)}
}
}
return nil
}
func unpack(v reflect.Value, buf *bytes.Buffer, endian ByteOrder) error {
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Uint:
break // explicit break expression to clarify separated case with "bool"
case reflect.Bool:
x, err := buf.ReadByte()
if err != nil {
return err
}
if x != 0 {
v.SetBool(true)
}
case reflect.Int8:
x, err := buf.ReadByte()
if err != nil {
return err
}
v.SetInt(int64(*(*int8)(unsafe.Pointer(&x))))
case reflect.Uint8:
x, err := buf.ReadByte()
if err != nil {
return err
}
v.SetUint(uint64(x))
case reflect.Int16:
b := make([]byte, 2)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(int64(endian.Int16(b)))
case reflect.Uint16:
b := make([]byte, 2)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(uint64(endian.Uint16(b)))
case reflect.Int32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(int64(endian.Int32(b)))
case reflect.Uint32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(uint64(endian.Uint32(b)))
case reflect.Int64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetInt(endian.Int64(b))
case reflect.Uint64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetUint(endian.Uint64(b))
case reflect.Float32:
b := make([]byte, 4)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetFloat(float64(endian.Float32(b)))
case reflect.Float64:
b := make([]byte, 8)
if _, err := buf.Read(b); err != nil {
return err
}
v.SetFloat(endian.Float64(b))
case reflect.Struct:
if err := Unmarshal(v.Addr().Interface(), buf); err != nil {
return err
}
case reflect.Array, reflect.Slice:
length := v.Len()
if v.Type().Elem().Kind() == reflect.Uint8 {
if _, err := buf.Read(v.Slice(0, length).Interface().([]byte)); err != nil {
return err
}
break
}
for j := 0; j < length; j++ {
if err := unpack(v.Index(j), buf, endian); err != nil {
return err
}
}
}
return nil
}
type InvalidMarshalTypeError struct {
T reflect.Type
}
func (err InvalidMarshalTypeError) Error() string {
return "The second argument of Marshal should be struct; given " + err.T.Kind().String()
}
type InvalidUnmarshalTypeError struct {
T reflect.Type
}
func (err InvalidUnmarshalTypeError) Error() string {
return "The second argument of Marshal should be struct pointer; given " + err.T.Kind().String()
}
type TypeAssertionError struct {
totype string
}
func (err TypeAssertionError) Error() string {
return "Type assertion failed to " + err.totype
}
type MarshalError struct {
E error
field reflect.StructField
}
func (err MarshalError) Error() string {
return "Error while marshaling " + err.field.Name + ": " + err.E.Error()
}
type UnmarshalError struct {
E error
field reflect.StructField
}
func (err UnmarshalError) Error() string {
return "Error while unmarshaling " + err.field.Name + ": " + err.E.Error()
}
|
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/conformal/btcjson"
"github.com/conformal/btcutil"
"github.com/conformal/btcws"
"github.com/conformal/go-flags"
"github.com/davecgh/go-spew/spew"
"io/ioutil"
"os"
"sort"
"strconv"
)
// conversionHandler is a handler that is used to convert parameters from the
// command line to a specific type. This is needed since the btcjson API
// expects certain types for various parameters.
type conversionHandler func(string) (interface{}, error)
// displayHandler is a handler that takes an interface and displays it to
// standard out. It is used by the handler data to type assert replies and
// show them formatted as desired.
type displayHandler func(interface{}) error
// handlerData contains information about how a command should be handled.
type handlerData struct {
requiredArgs int
optionalArgs int
displayHandler displayHandler
conversionHandlers []conversionHandler
makeCmd func([]interface{}) (btcjson.Cmd, error)
usage string
}
// Errors used in the various handlers.
var (
ErrNoDisplayHandler = errors.New("No display handler specified.")
ErrUsage = errors.New("btcctl usage") // Real usage is shown.
)
// commandHandlers is a map of commands and associated handler data that is used
// to validate correctness and perform the command.
var commandHandlers = map[string]*handlerData{
"addmultisigaddress": {2, 1, displayGeneric, []conversionHandler{toInt, nil, nil}, makeAddMultiSigAddress, "<numrequired> <[\"pubkey\",...]> [account]"},
"addnode": {2, 0, displayJSONDump, nil, makeAddNode, "<ip> <add/remove/onetry>"},
"createencryptedwallet": {1, 0, displayGeneric, nil, makeCreateEncryptedWallet, "<passphrase>"},
"createrawtransaction": {2, 0, displayGeneric, nil, makeCreateRawTransaction, "\"[{\"txid\":\"id\",\"vout\":n},...]\" \"{\"address\":amount,...}\""},
"debuglevel": {1, 0, displayGeneric, nil, makeDebugLevel, "<levelspec>"},
"decoderawtransaction": {1, 0, displayJSONDump, nil, makeDecodeRawTransaction, "<txhash>"},
"decodescript": {1, 0, displayJSONDump, nil, makeDecodeScript, "<hex>"},
"dumpprivkey": {1, 0, displayGeneric, nil, makeDumpPrivKey, "<bitcoinaddress>"},
"estimatefee": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeEstimateFee, "<numblocks>"},
"estimatepriority": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeEstimatePriority, "<numblocks>"},
"getaccount": {1, 0, displayGeneric, nil, makeGetAccount, "<address>"},
"getaccountaddress": {1, 0, displayGeneric, nil, makeGetAccountAddress, "<account>"},
"getaddednodeinfo": {1, 1, displayJSONDump, []conversionHandler{toBool, nil}, makeGetAddedNodeInfo, "<dns> [node]"},
"getaddressesbyaccount": {1, 0, displayJSONDump, nil, makeGetAddressesByAccount, "[account]"},
"getbalance": {0, 2, displayGeneric, []conversionHandler{nil, toInt}, makeGetBalance, "[account] [minconf=1]"},
"getbestblockhash": {0, 0, displayGeneric, nil, makeGetBestBlockHash, ""},
"getblock": {1, 2, displayJSONDump, []conversionHandler{nil, toBool, toBool}, makeGetBlock, "<blockhash>"},
"getblockchaininfo": {0, 0, displayJSONDump, nil, makeGetBlockChainInfo, ""},
"getblockcount": {0, 0, displayGeneric, nil, makeGetBlockCount, ""},
"getblockhash": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeGetBlockHash, "<blocknumber>"},
"getblocktemplate": {0, 1, displayJSONDump, nil, makeGetBlockTemplate, "[jsonrequestobject]"},
"getconnectioncount": {0, 0, displayGeneric, nil, makeGetConnectionCount, ""},
"getdifficulty": {0, 0, displayFloat64, nil, makeGetDifficulty, ""},
"getgenerate": {0, 0, displayGeneric, nil, makeGetGenerate, ""},
"gethashespersec": {0, 0, displayGeneric, nil, makeGetHashesPerSec, ""},
"getinfo": {0, 0, displayJSONDump, nil, makeGetInfo, ""},
"getnetworkhashps": {0, 2, displayGeneric, []conversionHandler{toInt, toInt}, makeGetNetworkHashPS, "[blocks height]"},
"getnettotals": {0, 0, displayJSONDump, nil, makeGetNetTotals, ""},
"getnewaddress": {0, 1, displayGeneric, nil, makeGetNewAddress, "[account]"},
"getpeerinfo": {0, 0, displayJSONDump, nil, makeGetPeerInfo, ""},
"getrawchangeaddress": {0, 0, displayGeneric, nil, makeGetRawChangeAddress, ""},
"getrawmempool": {0, 1, displayJSONDump, []conversionHandler{toBool}, makeGetRawMempool, "[verbose=false]"},
"getrawtransaction": {1, 1, displayJSONDump, []conversionHandler{nil, toInt}, makeGetRawTransaction, "<txhash> [verbose=0]"},
"getreceivedbyaccount": {1, 1, displayGeneric, []conversionHandler{nil, toInt}, makeGetReceivedByAccount, "<account> [minconf=1]"},
"getreceivedbyaddress": {1, 1, displayGeneric, []conversionHandler{nil, toInt}, makeGetReceivedByAddress, "<address> [minconf=1]"},
"gettransaction": {1, 1, displayJSONDump, nil, makeGetTransaction, "txid"},
"gettxoutsetinfo": {0, 0, displayJSONDump, nil, makeGetTxOutSetInfo, ""},
"getwork": {0, 1, displayJSONDump, nil, makeGetWork, "[data]"},
"help": {0, 1, displayGeneric, nil, makeHelp, "[commandName]"},
"importprivkey": {1, 2, displayGeneric, []conversionHandler{nil, nil, toBool}, makeImportPrivKey, "<wifprivkey> [label] [rescan=true]"},
"importwallet": {1, 0, displayGeneric, nil, makeImportWallet, "<filename>"},
"keypoolrefill": {0, 1, displayGeneric, []conversionHandler{toInt}, makeKeyPoolRefill, "[newsize]"},
"listaccounts": {0, 1, displayJSONDump, []conversionHandler{toInt}, makeListAccounts, "[minconf=1]"},
"listaddressgroupings": {0, 0, displayJSONDump, nil, makeListAddressGroupings, ""},
"listreceivedbyaccount": {0, 2, displayJSONDump, []conversionHandler{toInt, toBool}, makeListReceivedByAccount, "[minconf] [includeempty]"},
"listreceivedbyaddress": {0, 2, displayJSONDump, []conversionHandler{toInt, toBool}, makeListReceivedByAddress, "[minconf] [includeempty]"},
"listlockunspent": {0, 0, displayJSONDump, nil, makeListLockUnspent, ""},
"listsinceblock": {0, 2, displayJSONDump, []conversionHandler{nil, toInt}, makeListSinceBlock, "[blockhash] [minconf=10]"},
"listtransactions": {0, 3, displayJSONDump, []conversionHandler{nil, toInt, toInt}, makeListTransactions, "[account] [count=10] [from=0]"},
"listunspent": {0, 3, displayJSONDump, []conversionHandler{toInt, toInt, nil}, makeListUnspent, "[minconf=1] [maxconf=9999999] [jsonaddressarray]"},
"ping": {0, 0, displayGeneric, nil, makePing, ""},
"sendfrom": {3, 3, displayGeneric, []conversionHandler{nil, nil, toSatoshi, toInt, nil, nil},
makeSendFrom, "<account> <address> <amount> [minconf=1] [comment] [comment-to]"},
"sendmany": {2, 2, displayGeneric, []conversionHandler{nil, nil, toInt, nil}, makeSendMany, "<account> <{\"address\":amount,...}> [minconf=1] [comment]"},
"sendrawtransaction": {1, 0, displayGeneric, nil, makeSendRawTransaction, "<hextx>"},
"sendtoaddress": {2, 2, displayGeneric, []conversionHandler{nil, toSatoshi, nil, nil}, makeSendToAddress, "<address> <amount> [comment] [comment-to]"},
"setgenerate": {1, 1, displayGeneric, []conversionHandler{toBool, toInt}, makeSetGenerate, "<generate> [genproclimit]"},
"settxfee": {1, 0, displayGeneric, []conversionHandler{toSatoshi}, makeSetTxFee, "<amount>"},
"signmessage": {2, 2, displayGeneric, nil, makeSignMessage, "<address> <message>"},
"stop": {0, 0, displayGeneric, nil, makeStop, ""},
"submitblock": {1, 1, displayGeneric, nil, makeSubmitBlock, "<hexdata> [jsonparametersobject]"},
"validateaddress": {1, 0, displayJSONDump, nil, makeValidateAddress, "<address>"},
"verifychain": {0, 2, displayJSONDump, []conversionHandler{toInt, toInt}, makeVerifyChain, "[level] [numblocks]"},
"verifymessage": {3, 0, displayGeneric, nil, makeVerifyMessage, "<address> <signature> <message>"},
"walletlock": {0, 0, displayGeneric, nil, makeWalletLock, ""},
"walletpassphrase": {1, 1, displayGeneric, []conversionHandler{nil, toInt64}, makeWalletPassphrase, "<passphrase> [timeout]"},
"walletpassphrasechange": {2, 0, displayGeneric, nil, makeWalletPassphraseChange, "<oldpassphrase> <newpassphrase>"},
}
// toSatoshi attempts to convert the passed string to a satoshi amount returned
// as an int64. It returns the int64 packed into an interface so it can be used
// in the calls which expect interfaces. An error will be returned if the string
// can't be converted first to a float64.
func toSatoshi(val string) (interface{}, error) {
idx, err := strconv.ParseFloat(val, 64)
if err != nil {
return nil, err
}
return int64(float64(btcutil.SatoshiPerBitcoin) * idx), nil
}
// toInt attempts to convert the passed string to an integer. It returns the
// integer packed into an interface so it can be used in the calls which expect
// interfaces. An error will be returned if the string can't be converted to an
// integer.
func toInt(val string) (interface{}, error) {
idx, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
return idx, nil
}
// toInt64 attempts to convert the passed string to an int64. It returns the
// integer packed into an interface so it can be used in the calls which expect
// interfaces. An error will be returned if the string can't be converted to an
// integer.
func toInt64(val string) (interface{}, error) {
idx, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return nil, err
}
return idx, nil
}
// toBool attempts to convert the passed string to a bool. It returns the
// bool packed into the empty interface so it can be used in the calls which
// accept interfaces. An error will be returned if the string can't be
// converted to a bool.
func toBool(val string) (interface{}, error) {
return strconv.ParseBool(val)
}
// displayGeneric is a displayHandler that simply displays the passed interface
// using fmt.Println.
func displayGeneric(reply interface{}) error {
fmt.Println(reply)
return nil
}
// displayFloat64 is a displayHandler that ensures the concrete type of the
// passed interface is a float64 and displays it using fmt.Printf. An error
// is returned if a float64 is not passed.
func displayFloat64(reply interface{}) error {
if val, ok := reply.(float64); ok {
fmt.Printf("%f\n", val)
return nil
}
return fmt.Errorf("reply type is not a float64: %v", spew.Sdump(reply))
}
// displaySpewDump is a displayHandler that simply uses spew.Dump to display the
// passed interface.
func displaySpewDump(reply interface{}) error {
spew.Dump(reply)
return nil
}
// displayJSONDump is a displayHandler that uses json.Indent to display the
// passed interface.
func displayJSONDump(reply interface{}) error {
marshaledBytes, err := json.Marshal(reply)
if err != nil {
return err
}
var buf bytes.Buffer
err = json.Indent(&buf, marshaledBytes, "", "\t")
if err != nil {
return err
}
fmt.Println(buf.String())
return nil
}
// makeAddMultiSigAddress generates the cmd structure for addmultisigaddress commands.
func makeAddMultiSigAddress(args []interface{}) (btcjson.Cmd, error) {
var pubkeys []string
err := json.Unmarshal([]byte(args[1].(string)), &pubkeys)
if err != nil {
return nil, err
}
var opt string
if len(args) > 2 {
opt = args[2].(string)
}
return btcjson.NewAddMultisigAddressCmd("btcctl", args[0].(int), pubkeys, opt)
}
// makeAddNode generates the cmd structure for addnode commands.
func makeAddNode(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewAddNodeCmd("btcctl", args[0].(string),
args[1].(string))
}
// makeCreateEncryptedWallet generates the cmd structure for
// createencryptedwallet commands.
func makeCreateEncryptedWallet(args []interface{}) (btcjson.Cmd, error) {
return btcws.NewCreateEncryptedWalletCmd("btcctl", args[0].(string)), nil
}
// makeCreateRawTransaction generates the cmd structure for createrawtransaction
// commands.
func makeCreateRawTransaction(args []interface{}) (btcjson.Cmd, error) {
var inputs []btcjson.TransactionInput
err := json.Unmarshal([]byte(args[0].(string)), &inputs)
if err != nil {
return nil, err
}
var amounts map[string]int64
err = json.Unmarshal([]byte(args[1].(string)), &amounts)
if err != nil {
return nil, err
}
return btcjson.NewCreateRawTransactionCmd("btcctl", inputs, amounts)
}
// makeDebugLevel generates the cmd structure for debuglevel commands.
func makeDebugLevel(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDebugLevelCmd("btcctl", args[0].(string))
}
// makeDecodeRawTransaction generates the cmd structure for
// decoderawtransaction commands.
func makeDecodeRawTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDecodeRawTransactionCmd("btcctl", args[0].(string))
}
// makeDecodeScript generates the cmd structure for decodescript commands.
func makeDecodeScript(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDecodeScriptCmd("btcctl", args[0].(string))
}
// makeDumpPrivKey generates the cmd structure for
// dumpprivkey commands.
func makeDumpPrivKey(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDumpPrivKeyCmd("btcctl", args[0].(string))
}
// makeEstimateFee generates the cmd structure for estimatefee commands.
func makeEstimateFee(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewEstimateFeeCmd("btcctl", args[0].(int64))
}
// makeEstimatePriority generates the cmd structure for estimatepriority commands.
func makeEstimatePriority(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewEstimatePriorityCmd("btcctl", args[0].(int64))
}
// makeGetAccount generates the cmd structure for
// getaccount commands.
func makeGetAccount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAccountCmd("btcctl", args[0].(string))
}
// makeGetAccountAddress generates the cmd structure for
// getaccountaddress commands.
func makeGetAccountAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAccountAddressCmd("btcctl", args[0].(string))
}
// makeGetAddedNodeInfo generates the cmd structure for
// getaccountaddress commands.
func makeGetAddedNodeInfo(args []interface{}) (btcjson.Cmd, error) {
// Create the getaddednodeinfo command with defaults for the optional
// parameters.
cmd, err := btcjson.NewGetAddedNodeInfoCmd("btcctl", args[0].(bool))
if err != nil {
return nil, err
}
// Override the optional parameter if it was specified.
if len(args) > 1 {
cmd.Node = args[1].(string)
}
return cmd, nil
}
// makeGetAddressesByAccount generates the cmd structure for
// getaddressesbyaccount commands.
func makeGetAddressesByAccount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAddressesByAccountCmd("btcctl", args[0].(string))
}
// makeGetBalance generates the cmd structure for
// getbalance commands.
func makeGetBalance(args []interface{}) (btcjson.Cmd, error) {
optargs := make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewGetBalanceCmd("btcctl", optargs...)
}
// makeGetBestBlockHash generates the cmd structure for
// makebestblockhash commands.
func makeGetBestBlockHash(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBestBlockHashCmd("btcctl")
}
// makeGetBlock generates the cmd structure for getblock commands.
func makeGetBlock(args []interface{}) (btcjson.Cmd, error) {
// Create the getblock command with defaults for the optional
// parameters.
getBlockCmd, err := btcjson.NewGetBlockCmd("btcctl", args[0].(string))
if err != nil {
return nil, err
}
// Override the optional parameters if they were specified.
if len(args) > 1 {
getBlockCmd.Verbose = args[1].(bool)
}
if len(args) > 2 {
getBlockCmd.VerboseTx = args[2].(bool)
}
return getBlockCmd, nil
}
// makeGetBlockChainInfo generates the cmd structure for getblockchaininfo commands.
func makeGetBlockChainInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockChainInfoCmd("btcctl")
}
// makeGetBlockCount generates the cmd structure for getblockcount commands.
func makeGetBlockCount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockCountCmd("btcctl")
}
// makeGetBlockHash generates the cmd structure for getblockhash commands.
func makeGetBlockHash(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockHashCmd("btcctl", args[0].(int64))
}
// makeGetBlockTemplate generates the cmd structure for getblocktemplate commands.
func makeGetBlockTemplate(args []interface{}) (btcjson.Cmd, error) {
cmd, err := btcjson.NewGetBlockTemplateCmd("btcctl")
if err != nil {
return nil, err
}
if len(args) == 1 {
err = cmd.UnmarshalJSON([]byte(args[0].(string)))
if err != nil {
return nil, err
}
}
return cmd, nil
}
// makeGetConnectionCount generates the cmd structure for
// getconnectioncount commands.
func makeGetConnectionCount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetConnectionCountCmd("btcctl")
}
// makeGetDifficulty generates the cmd structure for
// getdifficulty commands.
func makeGetDifficulty(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetDifficultyCmd("btcctl")
}
// makeGetGenerate generates the cmd structure for
// getgenerate commands.
func makeGetGenerate(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetGenerateCmd("btcctl")
}
// makeGetHashesPerSec generates the cmd structure for gethashespersec commands.
func makeGetHashesPerSec(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetHashesPerSecCmd("btcctl")
}
// makeGetInfo generates the cmd structure for getinfo commands.
func makeGetInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetInfoCmd("btcctl")
}
// makeGetNetworkHashPS generates the cmd structure for getnetworkhashps
// commands.
func makeGetNetworkHashPS(args []interface{}) (btcjson.Cmd, error) {
// Create the getnetworkhashps command with defaults for the optional
// parameters.
cmd, err := btcjson.NewGetNetworkHashPSCmd("btcctl")
if err != nil {
return nil, err
}
// Override the optional blocks if specified.
if len(args) > 0 {
cmd.Blocks = args[0].(int)
}
// Override the optional height if specified.
if len(args) > 1 {
cmd.Height = args[1].(int)
}
return cmd, nil
}
// makeGetNetTotals generates the cmd structure for getnettotals commands.
func makeGetNetTotals(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetNetTotalsCmd("btcctl")
}
// makeGetNewAddress generates the cmd structure for getnewaddress commands.
func makeGetNewAddress(args []interface{}) (btcjson.Cmd, error) {
var account string
if len(args) > 0 {
account = args[0].(string)
}
return btcjson.NewGetNewAddressCmd("btcctl", account)
}
// makePeerInfo generates the cmd structure for
// getpeerinfo commands.
func makeGetPeerInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetPeerInfoCmd("btcctl")
}
// makeGetRawChangeAddress generates the cmd structure for getrawchangeaddress commands.
func makeGetRawChangeAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetRawChangeAddressCmd("btcctl")
}
// makeRawMempool generates the cmd structure for
// getrawmempool commands.
func makeGetRawMempool(args []interface{}) (btcjson.Cmd, error) {
opt := make([]bool, 0, 1)
if len(args) > 0 {
opt = append(opt, args[0].(bool))
}
return btcjson.NewGetRawMempoolCmd("btcctl", opt...)
}
// makeGetReceivedByAccount generates the cmd structure for
// getreceivedbyaccount commands.
func makeGetReceivedByAccount(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetReceivedByAccountCmd("btcctl", args[0].(string), opt...)
}
// makeGetReceivedByAddress generates the cmd structure for
// getreceivedbyaddress commands.
func makeGetReceivedByAddress(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetReceivedByAddressCmd("btcctl", args[0].(string), opt...)
}
// makeGetTransaction generates the cmd structure for gettransaction commands.
func makeGetTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetTransactionCmd("btcctl", args[0].(string))
}
// makeGetTxOutSetInfo generates the cmd structure for gettxoutsetinfo commands.
func makeGetTxOutSetInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetTxOutSetInfoCmd("btcctl")
}
func makeGetWork(args []interface{}) (btcjson.Cmd, error) {
cmd, err := btcjson.NewGetWorkCmd("btcctl")
if err != nil {
return nil, err
}
if len(args) == 1 {
cmd.Data = args[0].(string)
}
return cmd, nil
}
func makeHelp(args []interface{}) (btcjson.Cmd, error) {
opt := make([]string, 0, 1)
if len(args) > 0 {
opt = append(opt, args[0].(string))
}
return btcjson.NewHelpCmd("btcctl", opt...)
}
// makeRawTransaction generates the cmd structure for
// getrawtransaction commands.
func makeGetRawTransaction(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetRawTransactionCmd("btcctl", args[0].(string), opt...)
}
// makeImportPrivKey generates the cmd structure for
// importprivkey commands.
func makeImportPrivKey(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 1 {
optargs = append(optargs, args[1].(string))
}
if len(args) > 2 {
optargs = append(optargs, args[2].(bool))
}
return btcjson.NewImportPrivKeyCmd("btcctl", args[0].(string), optargs...)
}
// makeImportWallet generates the cmd structure for
// importwallet commands.
func makeImportWallet(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewImportWalletCmd("btcctl", args[0].(string))
}
// makeKeyPoolRefill generates the cmd structure for keypoolrefill commands.
func makeKeyPoolRefill(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]uint, 0, 1)
if len(args) > 0 {
optargs = append(optargs, uint(args[0].(int)))
}
return btcjson.NewKeyPoolRefillCmd("btcctl", optargs...)
}
// makeListAccounts generates the cmd structure for listaccounts commands.
func makeListAccounts(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]int, 0, 1)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
return btcjson.NewListAccountsCmd("btcctl", optargs...)
}
// makeListAddressGroupings generates the cmd structure for listaddressgroupings commands.
func makeListAddressGroupings(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewListAddressGroupingsCmd("btcctl")
}
// makeListReceivedByAccount generates the cmd structure for listreceivedbyaccount commands.
func makeListReceivedByAccount(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(bool))
}
return btcjson.NewListReceivedByAccountCmd("btcctl", optargs...)
}
// makeListReceivedByAddress generates the cmd structure for listreceivedbyaddress commands.
func makeListReceivedByAddress(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(bool))
}
return btcjson.NewListReceivedByAddressCmd("btcctl", optargs...)
}
// makeListLockUnspent generates the cmd structure for listlockunspent commands.
func makeListLockUnspent(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewListLockUnspentCmd("btcctl")
}
// makeListSinceBlock generates the cmd structure for
// listsinceblock commands.
func makeListSinceBlock(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewListSinceBlockCmd("btcctl", optargs...)
}
// makeListTransactions generates the cmd structure for
// listtransactions commands.
func makeListTransactions(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
if len(args) > 2 {
optargs = append(optargs, args[2].(int))
}
return btcjson.NewListTransactionsCmd("btcctl", optargs...)
}
// makeListUnspent generates the cmd structure for listunspent commands.
func makeListUnspent(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
if len(args) > 2 {
var addrs []string
err := json.Unmarshal([]byte(args[2].(string)), &addrs)
if err != nil {
return nil, err
}
optargs = append(optargs, addrs)
}
return btcjson.NewListUnspentCmd("btcctl", optargs...)
}
// makePing generates the cmd structure for ping commands.
func makePing(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewPingCmd("btcctl")
}
// makeSendFrom generates the cmd structure for sendfrom commands.
func makeSendFrom(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 3 {
optargs = append(optargs, args[3].(int))
}
if len(args) > 4 {
optargs = append(optargs, args[4].(string))
}
if len(args) > 5 {
optargs = append(optargs, args[5].(string))
}
return btcjson.NewSendFromCmd("btcctl", args[0].(string),
args[1].(string), args[2].(int64), optargs...)
}
// makeSendMany generates the cmd structure for sendmany commands.
func makeSendMany(args []interface{}) (btcjson.Cmd, error) {
origPairs := make(map[string]float64)
err := json.Unmarshal([]byte(args[1].(string)), &origPairs)
if err != nil {
return nil, err
}
pairs := make(map[string]int64)
for addr, value := range origPairs {
pairs[addr] = int64(float64(btcutil.SatoshiPerBitcoin) * value)
}
var optargs = make([]interface{}, 0, 2)
if len(args) > 2 {
optargs = append(optargs, args[2].(int))
}
if len(args) > 3 {
optargs = append(optargs, args[3].(string))
}
return btcjson.NewSendManyCmd("btcctl", args[0].(string), pairs, optargs...)
}
// makeSendRawTransaction generates the cmd structure for sendrawtransaction
// commands.
func makeSendRawTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSendRawTransactionCmd("btcctl", args[0].(string))
}
// makeSendToAddress generates the cmd struture for sendtoaddress commands.
func makeSendToAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSendToAddressCmd("btcctl", args[0].(string), args[1].(int64), args[2:]...)
}
// makeSetGenerate generates the cmd structure for setgenerate commands.
func makeSetGenerate(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]int, 0, 1)
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewSetGenerateCmd("btcctl", args[0].(bool), optargs...)
}
// makeSetTxFee generates the cmd structure for settxfee commands.
func makeSetTxFee(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSetTxFeeCmd("btcctl", args[0].(int64))
}
// makeSignMessage generates the cmd structure for signmessage commands.
func makeSignMessage(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSignMessageCmd("btcctl", args[0].(string),
args[1].(string))
}
// makeStop generates the cmd structure for stop commands.
func makeStop(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewStopCmd("btcctl")
}
// makeSubmitBlock generates the cmd structure for submitblock commands.
func makeSubmitBlock(args []interface{}) (btcjson.Cmd, error) {
opts := &btcjson.SubmitBlockOptions{}
if len(args) == 2 {
opts.WorkId = args[1].(string)
}
return btcjson.NewSubmitBlockCmd("btcctl", args[0].(string), opts)
}
// makeValidateAddress generates the cmd structure for validateaddress commands.
func makeValidateAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewValidateAddressCmd("btcctl", args[0].(string))
}
// makeVerifyChain generates the cmd structure for verifychain commands.
func makeVerifyChain(args []interface{}) (btcjson.Cmd, error) {
iargs := make([]int32, 0, 2)
for _, i := range args {
iargs = append(iargs, int32(i.(int)))
}
return btcjson.NewVerifyChainCmd("btcctl", iargs...)
}
func makeVerifyMessage(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewVerifyMessageCmd("btcctl", args[0].(string),
args[1].(string), args[2].(string))
}
// makeWalletLock generates the cmd structure for walletlock commands.
func makeWalletLock(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewWalletLockCmd("btcctl")
}
// makeWalletPassphrase generates the cmd structure for walletpassphrase commands.
func makeWalletPassphrase(args []interface{}) (btcjson.Cmd, error) {
timeout := int64(60)
if len(args) > 1 {
timeout = args[1].(int64)
}
return btcjson.NewWalletPassphraseCmd("btcctl", args[0].(string), timeout)
}
// makeWalletPassphraseChange generates the cmd structure for
// walletpassphrasechange commands.
func makeWalletPassphraseChange(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewWalletPassphraseChangeCmd("btcctl", args[0].(string),
args[1].(string))
}
// send sends a JSON-RPC command to the specified RPC server and examines the
// results for various error conditions. It either returns a valid result or
// an appropriate error.
func send(cfg *config, msg []byte) (interface{}, error) {
var reply btcjson.Reply
var err error
if cfg.NoTls || (cfg.RPCCert == "" && !cfg.TlsSkipVerify) {
reply, err = btcjson.RpcCommand(cfg.RPCUser, cfg.RPCPassword,
cfg.RPCServer, msg)
} else {
var pem []byte
if cfg.RPCCert != "" {
pem, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, err
}
}
reply, err = btcjson.TlsRpcCommand(cfg.RPCUser,
cfg.RPCPassword, cfg.RPCServer, msg, pem,
cfg.TlsSkipVerify)
}
if err != nil {
return nil, err
}
if reply.Error != nil {
return nil, reply.Error
}
return reply.Result, nil
}
// sendCommand creates a JSON-RPC command using the passed command and arguments
// and then sends it. A prefix is added to any errors that occur indicating
// what step failed.
func sendCommand(cfg *config, command btcjson.Cmd) (interface{}, error) {
msg, err := json.Marshal(command)
if err != nil {
return nil, fmt.Errorf("CreateMessage: %v", err.Error())
}
reply, err := send(cfg, msg)
if err != nil {
return nil, fmt.Errorf("RpcCommand: %v", err.Error())
}
return reply, nil
}
// commandHandler handles commands provided via the cli using the specific
// handler data to instruct the handler what to do.
func commandHandler(cfg *config, command string, data *handlerData, args []string) error {
// Ensure the number of arguments are the expected value.
if len(args) < data.requiredArgs {
return ErrUsage
}
if len(args) > data.requiredArgs+data.optionalArgs {
return ErrUsage
}
// Ensure there is a display handler.
if data.displayHandler == nil {
return ErrNoDisplayHandler
}
// Ensure the number of conversion handlers is valid if any are
// specified.
convHandlers := data.conversionHandlers
if convHandlers != nil && len(convHandlers) < len(args) {
return fmt.Errorf("The number of conversion handlers is invalid.")
}
// Convert input parameters per the conversion handlers.
iargs := make([]interface{}, len(args))
for i, arg := range args {
iargs[i] = arg
}
for i := range iargs {
if convHandlers != nil {
converter := convHandlers[i]
if converter != nil {
convertedArg, err := converter(args[i])
if err != nil {
return err
}
iargs[i] = convertedArg
}
}
}
cmd, err := data.makeCmd(iargs)
if err != nil {
return err
}
// Create and send the appropriate JSON-RPC command.
reply, err := sendCommand(cfg, cmd)
if err != nil {
return err
}
// Display the results of the JSON-RPC command using the provided
// display handler.
if reply != nil {
err = data.displayHandler(reply)
if err != nil {
return err
}
}
return nil
}
// usage displays the command usage.
func usage(parser *flags.Parser) {
parser.WriteHelp(os.Stderr)
// Extract usage information for each command from the command handler
// data and sort by command name.
fmt.Fprintf(os.Stderr, "\nCommands:\n")
usageStrings := make([]string, 0, len(commandHandlers))
for command, data := range commandHandlers {
usage := command
if len(data.usage) > 0 {
usage += " " + data.usage
}
usageStrings = append(usageStrings, usage)
}
sort.Sort(sort.StringSlice(usageStrings))
for _, usage := range usageStrings {
fmt.Fprintf(os.Stderr, "\t%s\n", usage)
}
}
func main() {
parser, cfg, args, err := loadConfig()
if err != nil {
usage(parser)
os.Exit(1)
}
if len(args) < 1 {
usage(parser)
return
}
// Display usage if the command is not supported.
data, exists := commandHandlers[args[0]]
if !exists {
fmt.Fprintf(os.Stderr, "Unrecognized command: %s\n", args[0])
usage(parser)
os.Exit(1)
}
// Execute the command.
err = commandHandler(cfg, args[0], data, args[1:])
if err != nil {
if err == ErrUsage {
usage(parser)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
Add getmininginfo to btcctl.
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/conformal/btcjson"
"github.com/conformal/btcutil"
"github.com/conformal/btcws"
"github.com/conformal/go-flags"
"github.com/davecgh/go-spew/spew"
"io/ioutil"
"os"
"sort"
"strconv"
)
// conversionHandler is a handler that is used to convert parameters from the
// command line to a specific type. This is needed since the btcjson API
// expects certain types for various parameters.
type conversionHandler func(string) (interface{}, error)
// displayHandler is a handler that takes an interface and displays it to
// standard out. It is used by the handler data to type assert replies and
// show them formatted as desired.
type displayHandler func(interface{}) error
// handlerData contains information about how a command should be handled.
type handlerData struct {
requiredArgs int
optionalArgs int
displayHandler displayHandler
conversionHandlers []conversionHandler
makeCmd func([]interface{}) (btcjson.Cmd, error)
usage string
}
// Errors used in the various handlers.
var (
ErrNoDisplayHandler = errors.New("No display handler specified.")
ErrUsage = errors.New("btcctl usage") // Real usage is shown.
)
// commandHandlers is a map of commands and associated handler data that is used
// to validate correctness and perform the command.
var commandHandlers = map[string]*handlerData{
"addmultisigaddress": {2, 1, displayGeneric, []conversionHandler{toInt, nil, nil}, makeAddMultiSigAddress, "<numrequired> <[\"pubkey\",...]> [account]"},
"addnode": {2, 0, displayJSONDump, nil, makeAddNode, "<ip> <add/remove/onetry>"},
"createencryptedwallet": {1, 0, displayGeneric, nil, makeCreateEncryptedWallet, "<passphrase>"},
"createrawtransaction": {2, 0, displayGeneric, nil, makeCreateRawTransaction, "\"[{\"txid\":\"id\",\"vout\":n},...]\" \"{\"address\":amount,...}\""},
"debuglevel": {1, 0, displayGeneric, nil, makeDebugLevel, "<levelspec>"},
"decoderawtransaction": {1, 0, displayJSONDump, nil, makeDecodeRawTransaction, "<txhash>"},
"decodescript": {1, 0, displayJSONDump, nil, makeDecodeScript, "<hex>"},
"dumpprivkey": {1, 0, displayGeneric, nil, makeDumpPrivKey, "<bitcoinaddress>"},
"estimatefee": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeEstimateFee, "<numblocks>"},
"estimatepriority": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeEstimatePriority, "<numblocks>"},
"getaccount": {1, 0, displayGeneric, nil, makeGetAccount, "<address>"},
"getaccountaddress": {1, 0, displayGeneric, nil, makeGetAccountAddress, "<account>"},
"getaddednodeinfo": {1, 1, displayJSONDump, []conversionHandler{toBool, nil}, makeGetAddedNodeInfo, "<dns> [node]"},
"getaddressesbyaccount": {1, 0, displayJSONDump, nil, makeGetAddressesByAccount, "[account]"},
"getbalance": {0, 2, displayGeneric, []conversionHandler{nil, toInt}, makeGetBalance, "[account] [minconf=1]"},
"getbestblockhash": {0, 0, displayGeneric, nil, makeGetBestBlockHash, ""},
"getblock": {1, 2, displayJSONDump, []conversionHandler{nil, toBool, toBool}, makeGetBlock, "<blockhash>"},
"getblockchaininfo": {0, 0, displayJSONDump, nil, makeGetBlockChainInfo, ""},
"getblockcount": {0, 0, displayGeneric, nil, makeGetBlockCount, ""},
"getblockhash": {1, 0, displayGeneric, []conversionHandler{toInt64}, makeGetBlockHash, "<blocknumber>"},
"getblocktemplate": {0, 1, displayJSONDump, nil, makeGetBlockTemplate, "[jsonrequestobject]"},
"getconnectioncount": {0, 0, displayGeneric, nil, makeGetConnectionCount, ""},
"getdifficulty": {0, 0, displayFloat64, nil, makeGetDifficulty, ""},
"getgenerate": {0, 0, displayGeneric, nil, makeGetGenerate, ""},
"gethashespersec": {0, 0, displayGeneric, nil, makeGetHashesPerSec, ""},
"getinfo": {0, 0, displayJSONDump, nil, makeGetInfo, ""},
"getmininginfo": {0, 0, displayJSONDump, nil, makeGetMiningInfo, ""},
"getnetworkhashps": {0, 2, displayGeneric, []conversionHandler{toInt, toInt}, makeGetNetworkHashPS, "[blocks height]"},
"getnettotals": {0, 0, displayJSONDump, nil, makeGetNetTotals, ""},
"getnewaddress": {0, 1, displayGeneric, nil, makeGetNewAddress, "[account]"},
"getpeerinfo": {0, 0, displayJSONDump, nil, makeGetPeerInfo, ""},
"getrawchangeaddress": {0, 0, displayGeneric, nil, makeGetRawChangeAddress, ""},
"getrawmempool": {0, 1, displayJSONDump, []conversionHandler{toBool}, makeGetRawMempool, "[verbose=false]"},
"getrawtransaction": {1, 1, displayJSONDump, []conversionHandler{nil, toInt}, makeGetRawTransaction, "<txhash> [verbose=0]"},
"getreceivedbyaccount": {1, 1, displayGeneric, []conversionHandler{nil, toInt}, makeGetReceivedByAccount, "<account> [minconf=1]"},
"getreceivedbyaddress": {1, 1, displayGeneric, []conversionHandler{nil, toInt}, makeGetReceivedByAddress, "<address> [minconf=1]"},
"gettransaction": {1, 1, displayJSONDump, nil, makeGetTransaction, "txid"},
"gettxoutsetinfo": {0, 0, displayJSONDump, nil, makeGetTxOutSetInfo, ""},
"getwork": {0, 1, displayJSONDump, nil, makeGetWork, "[data]"},
"help": {0, 1, displayGeneric, nil, makeHelp, "[commandName]"},
"importprivkey": {1, 2, displayGeneric, []conversionHandler{nil, nil, toBool}, makeImportPrivKey, "<wifprivkey> [label] [rescan=true]"},
"importwallet": {1, 0, displayGeneric, nil, makeImportWallet, "<filename>"},
"keypoolrefill": {0, 1, displayGeneric, []conversionHandler{toInt}, makeKeyPoolRefill, "[newsize]"},
"listaccounts": {0, 1, displayJSONDump, []conversionHandler{toInt}, makeListAccounts, "[minconf=1]"},
"listaddressgroupings": {0, 0, displayJSONDump, nil, makeListAddressGroupings, ""},
"listreceivedbyaccount": {0, 2, displayJSONDump, []conversionHandler{toInt, toBool}, makeListReceivedByAccount, "[minconf] [includeempty]"},
"listreceivedbyaddress": {0, 2, displayJSONDump, []conversionHandler{toInt, toBool}, makeListReceivedByAddress, "[minconf] [includeempty]"},
"listlockunspent": {0, 0, displayJSONDump, nil, makeListLockUnspent, ""},
"listsinceblock": {0, 2, displayJSONDump, []conversionHandler{nil, toInt}, makeListSinceBlock, "[blockhash] [minconf=10]"},
"listtransactions": {0, 3, displayJSONDump, []conversionHandler{nil, toInt, toInt}, makeListTransactions, "[account] [count=10] [from=0]"},
"listunspent": {0, 3, displayJSONDump, []conversionHandler{toInt, toInt, nil}, makeListUnspent, "[minconf=1] [maxconf=9999999] [jsonaddressarray]"},
"ping": {0, 0, displayGeneric, nil, makePing, ""},
"sendfrom": {3, 3, displayGeneric, []conversionHandler{nil, nil, toSatoshi, toInt, nil, nil},
makeSendFrom, "<account> <address> <amount> [minconf=1] [comment] [comment-to]"},
"sendmany": {2, 2, displayGeneric, []conversionHandler{nil, nil, toInt, nil}, makeSendMany, "<account> <{\"address\":amount,...}> [minconf=1] [comment]"},
"sendrawtransaction": {1, 0, displayGeneric, nil, makeSendRawTransaction, "<hextx>"},
"sendtoaddress": {2, 2, displayGeneric, []conversionHandler{nil, toSatoshi, nil, nil}, makeSendToAddress, "<address> <amount> [comment] [comment-to]"},
"setgenerate": {1, 1, displayGeneric, []conversionHandler{toBool, toInt}, makeSetGenerate, "<generate> [genproclimit]"},
"settxfee": {1, 0, displayGeneric, []conversionHandler{toSatoshi}, makeSetTxFee, "<amount>"},
"signmessage": {2, 2, displayGeneric, nil, makeSignMessage, "<address> <message>"},
"stop": {0, 0, displayGeneric, nil, makeStop, ""},
"submitblock": {1, 1, displayGeneric, nil, makeSubmitBlock, "<hexdata> [jsonparametersobject]"},
"validateaddress": {1, 0, displayJSONDump, nil, makeValidateAddress, "<address>"},
"verifychain": {0, 2, displayJSONDump, []conversionHandler{toInt, toInt}, makeVerifyChain, "[level] [numblocks]"},
"verifymessage": {3, 0, displayGeneric, nil, makeVerifyMessage, "<address> <signature> <message>"},
"walletlock": {0, 0, displayGeneric, nil, makeWalletLock, ""},
"walletpassphrase": {1, 1, displayGeneric, []conversionHandler{nil, toInt64}, makeWalletPassphrase, "<passphrase> [timeout]"},
"walletpassphrasechange": {2, 0, displayGeneric, nil, makeWalletPassphraseChange, "<oldpassphrase> <newpassphrase>"},
}
// toSatoshi attempts to convert the passed string to a satoshi amount returned
// as an int64. It returns the int64 packed into an interface so it can be used
// in the calls which expect interfaces. An error will be returned if the string
// can't be converted first to a float64.
func toSatoshi(val string) (interface{}, error) {
idx, err := strconv.ParseFloat(val, 64)
if err != nil {
return nil, err
}
return int64(float64(btcutil.SatoshiPerBitcoin) * idx), nil
}
// toInt attempts to convert the passed string to an integer. It returns the
// integer packed into an interface so it can be used in the calls which expect
// interfaces. An error will be returned if the string can't be converted to an
// integer.
func toInt(val string) (interface{}, error) {
idx, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
return idx, nil
}
// toInt64 attempts to convert the passed string to an int64. It returns the
// integer packed into an interface so it can be used in the calls which expect
// interfaces. An error will be returned if the string can't be converted to an
// integer.
func toInt64(val string) (interface{}, error) {
idx, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return nil, err
}
return idx, nil
}
// toBool attempts to convert the passed string to a bool. It returns the
// bool packed into the empty interface so it can be used in the calls which
// accept interfaces. An error will be returned if the string can't be
// converted to a bool.
func toBool(val string) (interface{}, error) {
return strconv.ParseBool(val)
}
// displayGeneric is a displayHandler that simply displays the passed interface
// using fmt.Println.
func displayGeneric(reply interface{}) error {
fmt.Println(reply)
return nil
}
// displayFloat64 is a displayHandler that ensures the concrete type of the
// passed interface is a float64 and displays it using fmt.Printf. An error
// is returned if a float64 is not passed.
func displayFloat64(reply interface{}) error {
if val, ok := reply.(float64); ok {
fmt.Printf("%f\n", val)
return nil
}
return fmt.Errorf("reply type is not a float64: %v", spew.Sdump(reply))
}
// displaySpewDump is a displayHandler that simply uses spew.Dump to display the
// passed interface.
func displaySpewDump(reply interface{}) error {
spew.Dump(reply)
return nil
}
// displayJSONDump is a displayHandler that uses json.Indent to display the
// passed interface.
func displayJSONDump(reply interface{}) error {
marshaledBytes, err := json.Marshal(reply)
if err != nil {
return err
}
var buf bytes.Buffer
err = json.Indent(&buf, marshaledBytes, "", "\t")
if err != nil {
return err
}
fmt.Println(buf.String())
return nil
}
// makeAddMultiSigAddress generates the cmd structure for addmultisigaddress commands.
func makeAddMultiSigAddress(args []interface{}) (btcjson.Cmd, error) {
var pubkeys []string
err := json.Unmarshal([]byte(args[1].(string)), &pubkeys)
if err != nil {
return nil, err
}
var opt string
if len(args) > 2 {
opt = args[2].(string)
}
return btcjson.NewAddMultisigAddressCmd("btcctl", args[0].(int), pubkeys, opt)
}
// makeAddNode generates the cmd structure for addnode commands.
func makeAddNode(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewAddNodeCmd("btcctl", args[0].(string),
args[1].(string))
}
// makeCreateEncryptedWallet generates the cmd structure for
// createencryptedwallet commands.
func makeCreateEncryptedWallet(args []interface{}) (btcjson.Cmd, error) {
return btcws.NewCreateEncryptedWalletCmd("btcctl", args[0].(string)), nil
}
// makeCreateRawTransaction generates the cmd structure for createrawtransaction
// commands.
func makeCreateRawTransaction(args []interface{}) (btcjson.Cmd, error) {
var inputs []btcjson.TransactionInput
err := json.Unmarshal([]byte(args[0].(string)), &inputs)
if err != nil {
return nil, err
}
var amounts map[string]int64
err = json.Unmarshal([]byte(args[1].(string)), &amounts)
if err != nil {
return nil, err
}
return btcjson.NewCreateRawTransactionCmd("btcctl", inputs, amounts)
}
// makeDebugLevel generates the cmd structure for debuglevel commands.
func makeDebugLevel(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDebugLevelCmd("btcctl", args[0].(string))
}
// makeDecodeRawTransaction generates the cmd structure for
// decoderawtransaction commands.
func makeDecodeRawTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDecodeRawTransactionCmd("btcctl", args[0].(string))
}
// makeDecodeScript generates the cmd structure for decodescript commands.
func makeDecodeScript(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDecodeScriptCmd("btcctl", args[0].(string))
}
// makeDumpPrivKey generates the cmd structure for
// dumpprivkey commands.
func makeDumpPrivKey(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewDumpPrivKeyCmd("btcctl", args[0].(string))
}
// makeEstimateFee generates the cmd structure for estimatefee commands.
func makeEstimateFee(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewEstimateFeeCmd("btcctl", args[0].(int64))
}
// makeEstimatePriority generates the cmd structure for estimatepriority commands.
func makeEstimatePriority(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewEstimatePriorityCmd("btcctl", args[0].(int64))
}
// makeGetAccount generates the cmd structure for
// getaccount commands.
func makeGetAccount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAccountCmd("btcctl", args[0].(string))
}
// makeGetAccountAddress generates the cmd structure for
// getaccountaddress commands.
func makeGetAccountAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAccountAddressCmd("btcctl", args[0].(string))
}
// makeGetAddedNodeInfo generates the cmd structure for
// getaccountaddress commands.
func makeGetAddedNodeInfo(args []interface{}) (btcjson.Cmd, error) {
// Create the getaddednodeinfo command with defaults for the optional
// parameters.
cmd, err := btcjson.NewGetAddedNodeInfoCmd("btcctl", args[0].(bool))
if err != nil {
return nil, err
}
// Override the optional parameter if it was specified.
if len(args) > 1 {
cmd.Node = args[1].(string)
}
return cmd, nil
}
// makeGetAddressesByAccount generates the cmd structure for
// getaddressesbyaccount commands.
func makeGetAddressesByAccount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetAddressesByAccountCmd("btcctl", args[0].(string))
}
// makeGetBalance generates the cmd structure for
// getbalance commands.
func makeGetBalance(args []interface{}) (btcjson.Cmd, error) {
optargs := make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewGetBalanceCmd("btcctl", optargs...)
}
// makeGetBestBlockHash generates the cmd structure for
// makebestblockhash commands.
func makeGetBestBlockHash(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBestBlockHashCmd("btcctl")
}
// makeGetBlock generates the cmd structure for getblock commands.
func makeGetBlock(args []interface{}) (btcjson.Cmd, error) {
// Create the getblock command with defaults for the optional
// parameters.
getBlockCmd, err := btcjson.NewGetBlockCmd("btcctl", args[0].(string))
if err != nil {
return nil, err
}
// Override the optional parameters if they were specified.
if len(args) > 1 {
getBlockCmd.Verbose = args[1].(bool)
}
if len(args) > 2 {
getBlockCmd.VerboseTx = args[2].(bool)
}
return getBlockCmd, nil
}
// makeGetBlockChainInfo generates the cmd structure for getblockchaininfo commands.
func makeGetBlockChainInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockChainInfoCmd("btcctl")
}
// makeGetBlockCount generates the cmd structure for getblockcount commands.
func makeGetBlockCount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockCountCmd("btcctl")
}
// makeGetBlockHash generates the cmd structure for getblockhash commands.
func makeGetBlockHash(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetBlockHashCmd("btcctl", args[0].(int64))
}
// makeGetBlockTemplate generates the cmd structure for getblocktemplate commands.
func makeGetBlockTemplate(args []interface{}) (btcjson.Cmd, error) {
cmd, err := btcjson.NewGetBlockTemplateCmd("btcctl")
if err != nil {
return nil, err
}
if len(args) == 1 {
err = cmd.UnmarshalJSON([]byte(args[0].(string)))
if err != nil {
return nil, err
}
}
return cmd, nil
}
// makeGetConnectionCount generates the cmd structure for
// getconnectioncount commands.
func makeGetConnectionCount(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetConnectionCountCmd("btcctl")
}
// makeGetDifficulty generates the cmd structure for
// getdifficulty commands.
func makeGetDifficulty(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetDifficultyCmd("btcctl")
}
// makeGetGenerate generates the cmd structure for
// getgenerate commands.
func makeGetGenerate(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetGenerateCmd("btcctl")
}
// makeGetHashesPerSec generates the cmd structure for gethashespersec commands.
func makeGetHashesPerSec(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetHashesPerSecCmd("btcctl")
}
// makeGetInfo generates the cmd structure for getinfo commands.
func makeGetInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetInfoCmd("btcctl")
}
// makeGetMiningInfo generates the cmd structure for getmininginfo commands.
func makeGetMiningInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetMiningInfoCmd("btcctl")
}
// makeGetNetworkHashPS generates the cmd structure for getnetworkhashps
// commands.
func makeGetNetworkHashPS(args []interface{}) (btcjson.Cmd, error) {
// Create the getnetworkhashps command with defaults for the optional
// parameters.
cmd, err := btcjson.NewGetNetworkHashPSCmd("btcctl")
if err != nil {
return nil, err
}
// Override the optional blocks if specified.
if len(args) > 0 {
cmd.Blocks = args[0].(int)
}
// Override the optional height if specified.
if len(args) > 1 {
cmd.Height = args[1].(int)
}
return cmd, nil
}
// makeGetNetTotals generates the cmd structure for getnettotals commands.
func makeGetNetTotals(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetNetTotalsCmd("btcctl")
}
// makeGetNewAddress generates the cmd structure for getnewaddress commands.
func makeGetNewAddress(args []interface{}) (btcjson.Cmd, error) {
var account string
if len(args) > 0 {
account = args[0].(string)
}
return btcjson.NewGetNewAddressCmd("btcctl", account)
}
// makePeerInfo generates the cmd structure for
// getpeerinfo commands.
func makeGetPeerInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetPeerInfoCmd("btcctl")
}
// makeGetRawChangeAddress generates the cmd structure for getrawchangeaddress commands.
func makeGetRawChangeAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetRawChangeAddressCmd("btcctl")
}
// makeRawMempool generates the cmd structure for
// getrawmempool commands.
func makeGetRawMempool(args []interface{}) (btcjson.Cmd, error) {
opt := make([]bool, 0, 1)
if len(args) > 0 {
opt = append(opt, args[0].(bool))
}
return btcjson.NewGetRawMempoolCmd("btcctl", opt...)
}
// makeGetReceivedByAccount generates the cmd structure for
// getreceivedbyaccount commands.
func makeGetReceivedByAccount(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetReceivedByAccountCmd("btcctl", args[0].(string), opt...)
}
// makeGetReceivedByAddress generates the cmd structure for
// getreceivedbyaddress commands.
func makeGetReceivedByAddress(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetReceivedByAddressCmd("btcctl", args[0].(string), opt...)
}
// makeGetTransaction generates the cmd structure for gettransaction commands.
func makeGetTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetTransactionCmd("btcctl", args[0].(string))
}
// makeGetTxOutSetInfo generates the cmd structure for gettxoutsetinfo commands.
func makeGetTxOutSetInfo(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewGetTxOutSetInfoCmd("btcctl")
}
func makeGetWork(args []interface{}) (btcjson.Cmd, error) {
cmd, err := btcjson.NewGetWorkCmd("btcctl")
if err != nil {
return nil, err
}
if len(args) == 1 {
cmd.Data = args[0].(string)
}
return cmd, nil
}
func makeHelp(args []interface{}) (btcjson.Cmd, error) {
opt := make([]string, 0, 1)
if len(args) > 0 {
opt = append(opt, args[0].(string))
}
return btcjson.NewHelpCmd("btcctl", opt...)
}
// makeRawTransaction generates the cmd structure for
// getrawtransaction commands.
func makeGetRawTransaction(args []interface{}) (btcjson.Cmd, error) {
opt := make([]int, 0, 1)
if len(args) > 1 {
opt = append(opt, args[1].(int))
}
return btcjson.NewGetRawTransactionCmd("btcctl", args[0].(string), opt...)
}
// makeImportPrivKey generates the cmd structure for
// importprivkey commands.
func makeImportPrivKey(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 1 {
optargs = append(optargs, args[1].(string))
}
if len(args) > 2 {
optargs = append(optargs, args[2].(bool))
}
return btcjson.NewImportPrivKeyCmd("btcctl", args[0].(string), optargs...)
}
// makeImportWallet generates the cmd structure for
// importwallet commands.
func makeImportWallet(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewImportWalletCmd("btcctl", args[0].(string))
}
// makeKeyPoolRefill generates the cmd structure for keypoolrefill commands.
func makeKeyPoolRefill(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]uint, 0, 1)
if len(args) > 0 {
optargs = append(optargs, uint(args[0].(int)))
}
return btcjson.NewKeyPoolRefillCmd("btcctl", optargs...)
}
// makeListAccounts generates the cmd structure for listaccounts commands.
func makeListAccounts(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]int, 0, 1)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
return btcjson.NewListAccountsCmd("btcctl", optargs...)
}
// makeListAddressGroupings generates the cmd structure for listaddressgroupings commands.
func makeListAddressGroupings(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewListAddressGroupingsCmd("btcctl")
}
// makeListReceivedByAccount generates the cmd structure for listreceivedbyaccount commands.
func makeListReceivedByAccount(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(bool))
}
return btcjson.NewListReceivedByAccountCmd("btcctl", optargs...)
}
// makeListReceivedByAddress generates the cmd structure for listreceivedbyaddress commands.
func makeListReceivedByAddress(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(bool))
}
return btcjson.NewListReceivedByAddressCmd("btcctl", optargs...)
}
// makeListLockUnspent generates the cmd structure for listlockunspent commands.
func makeListLockUnspent(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewListLockUnspentCmd("btcctl")
}
// makeListSinceBlock generates the cmd structure for
// listsinceblock commands.
func makeListSinceBlock(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 2)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewListSinceBlockCmd("btcctl", optargs...)
}
// makeListTransactions generates the cmd structure for
// listtransactions commands.
func makeListTransactions(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 0 {
optargs = append(optargs, args[0].(string))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
if len(args) > 2 {
optargs = append(optargs, args[2].(int))
}
return btcjson.NewListTransactionsCmd("btcctl", optargs...)
}
// makeListUnspent generates the cmd structure for listunspent commands.
func makeListUnspent(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 0 {
optargs = append(optargs, args[0].(int))
}
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
if len(args) > 2 {
var addrs []string
err := json.Unmarshal([]byte(args[2].(string)), &addrs)
if err != nil {
return nil, err
}
optargs = append(optargs, addrs)
}
return btcjson.NewListUnspentCmd("btcctl", optargs...)
}
// makePing generates the cmd structure for ping commands.
func makePing(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewPingCmd("btcctl")
}
// makeSendFrom generates the cmd structure for sendfrom commands.
func makeSendFrom(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3)
if len(args) > 3 {
optargs = append(optargs, args[3].(int))
}
if len(args) > 4 {
optargs = append(optargs, args[4].(string))
}
if len(args) > 5 {
optargs = append(optargs, args[5].(string))
}
return btcjson.NewSendFromCmd("btcctl", args[0].(string),
args[1].(string), args[2].(int64), optargs...)
}
// makeSendMany generates the cmd structure for sendmany commands.
func makeSendMany(args []interface{}) (btcjson.Cmd, error) {
origPairs := make(map[string]float64)
err := json.Unmarshal([]byte(args[1].(string)), &origPairs)
if err != nil {
return nil, err
}
pairs := make(map[string]int64)
for addr, value := range origPairs {
pairs[addr] = int64(float64(btcutil.SatoshiPerBitcoin) * value)
}
var optargs = make([]interface{}, 0, 2)
if len(args) > 2 {
optargs = append(optargs, args[2].(int))
}
if len(args) > 3 {
optargs = append(optargs, args[3].(string))
}
return btcjson.NewSendManyCmd("btcctl", args[0].(string), pairs, optargs...)
}
// makeSendRawTransaction generates the cmd structure for sendrawtransaction
// commands.
func makeSendRawTransaction(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSendRawTransactionCmd("btcctl", args[0].(string))
}
// makeSendToAddress generates the cmd struture for sendtoaddress commands.
func makeSendToAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSendToAddressCmd("btcctl", args[0].(string), args[1].(int64), args[2:]...)
}
// makeSetGenerate generates the cmd structure for setgenerate commands.
func makeSetGenerate(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]int, 0, 1)
if len(args) > 1 {
optargs = append(optargs, args[1].(int))
}
return btcjson.NewSetGenerateCmd("btcctl", args[0].(bool), optargs...)
}
// makeSetTxFee generates the cmd structure for settxfee commands.
func makeSetTxFee(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSetTxFeeCmd("btcctl", args[0].(int64))
}
// makeSignMessage generates the cmd structure for signmessage commands.
func makeSignMessage(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewSignMessageCmd("btcctl", args[0].(string),
args[1].(string))
}
// makeStop generates the cmd structure for stop commands.
func makeStop(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewStopCmd("btcctl")
}
// makeSubmitBlock generates the cmd structure for submitblock commands.
func makeSubmitBlock(args []interface{}) (btcjson.Cmd, error) {
opts := &btcjson.SubmitBlockOptions{}
if len(args) == 2 {
opts.WorkId = args[1].(string)
}
return btcjson.NewSubmitBlockCmd("btcctl", args[0].(string), opts)
}
// makeValidateAddress generates the cmd structure for validateaddress commands.
func makeValidateAddress(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewValidateAddressCmd("btcctl", args[0].(string))
}
// makeVerifyChain generates the cmd structure for verifychain commands.
func makeVerifyChain(args []interface{}) (btcjson.Cmd, error) {
iargs := make([]int32, 0, 2)
for _, i := range args {
iargs = append(iargs, int32(i.(int)))
}
return btcjson.NewVerifyChainCmd("btcctl", iargs...)
}
func makeVerifyMessage(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewVerifyMessageCmd("btcctl", args[0].(string),
args[1].(string), args[2].(string))
}
// makeWalletLock generates the cmd structure for walletlock commands.
func makeWalletLock(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewWalletLockCmd("btcctl")
}
// makeWalletPassphrase generates the cmd structure for walletpassphrase commands.
func makeWalletPassphrase(args []interface{}) (btcjson.Cmd, error) {
timeout := int64(60)
if len(args) > 1 {
timeout = args[1].(int64)
}
return btcjson.NewWalletPassphraseCmd("btcctl", args[0].(string), timeout)
}
// makeWalletPassphraseChange generates the cmd structure for
// walletpassphrasechange commands.
func makeWalletPassphraseChange(args []interface{}) (btcjson.Cmd, error) {
return btcjson.NewWalletPassphraseChangeCmd("btcctl", args[0].(string),
args[1].(string))
}
// send sends a JSON-RPC command to the specified RPC server and examines the
// results for various error conditions. It either returns a valid result or
// an appropriate error.
func send(cfg *config, msg []byte) (interface{}, error) {
var reply btcjson.Reply
var err error
if cfg.NoTls || (cfg.RPCCert == "" && !cfg.TlsSkipVerify) {
reply, err = btcjson.RpcCommand(cfg.RPCUser, cfg.RPCPassword,
cfg.RPCServer, msg)
} else {
var pem []byte
if cfg.RPCCert != "" {
pem, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, err
}
}
reply, err = btcjson.TlsRpcCommand(cfg.RPCUser,
cfg.RPCPassword, cfg.RPCServer, msg, pem,
cfg.TlsSkipVerify)
}
if err != nil {
return nil, err
}
if reply.Error != nil {
return nil, reply.Error
}
return reply.Result, nil
}
// sendCommand creates a JSON-RPC command using the passed command and arguments
// and then sends it. A prefix is added to any errors that occur indicating
// what step failed.
func sendCommand(cfg *config, command btcjson.Cmd) (interface{}, error) {
msg, err := json.Marshal(command)
if err != nil {
return nil, fmt.Errorf("CreateMessage: %v", err.Error())
}
reply, err := send(cfg, msg)
if err != nil {
return nil, fmt.Errorf("RpcCommand: %v", err.Error())
}
return reply, nil
}
// commandHandler handles commands provided via the cli using the specific
// handler data to instruct the handler what to do.
func commandHandler(cfg *config, command string, data *handlerData, args []string) error {
// Ensure the number of arguments are the expected value.
if len(args) < data.requiredArgs {
return ErrUsage
}
if len(args) > data.requiredArgs+data.optionalArgs {
return ErrUsage
}
// Ensure there is a display handler.
if data.displayHandler == nil {
return ErrNoDisplayHandler
}
// Ensure the number of conversion handlers is valid if any are
// specified.
convHandlers := data.conversionHandlers
if convHandlers != nil && len(convHandlers) < len(args) {
return fmt.Errorf("The number of conversion handlers is invalid.")
}
// Convert input parameters per the conversion handlers.
iargs := make([]interface{}, len(args))
for i, arg := range args {
iargs[i] = arg
}
for i := range iargs {
if convHandlers != nil {
converter := convHandlers[i]
if converter != nil {
convertedArg, err := converter(args[i])
if err != nil {
return err
}
iargs[i] = convertedArg
}
}
}
cmd, err := data.makeCmd(iargs)
if err != nil {
return err
}
// Create and send the appropriate JSON-RPC command.
reply, err := sendCommand(cfg, cmd)
if err != nil {
return err
}
// Display the results of the JSON-RPC command using the provided
// display handler.
if reply != nil {
err = data.displayHandler(reply)
if err != nil {
return err
}
}
return nil
}
// usage displays the command usage.
func usage(parser *flags.Parser) {
parser.WriteHelp(os.Stderr)
// Extract usage information for each command from the command handler
// data and sort by command name.
fmt.Fprintf(os.Stderr, "\nCommands:\n")
usageStrings := make([]string, 0, len(commandHandlers))
for command, data := range commandHandlers {
usage := command
if len(data.usage) > 0 {
usage += " " + data.usage
}
usageStrings = append(usageStrings, usage)
}
sort.Sort(sort.StringSlice(usageStrings))
for _, usage := range usageStrings {
fmt.Fprintf(os.Stderr, "\t%s\n", usage)
}
}
func main() {
parser, cfg, args, err := loadConfig()
if err != nil {
usage(parser)
os.Exit(1)
}
if len(args) < 1 {
usage(parser)
return
}
// Display usage if the command is not supported.
data, exists := commandHandlers[args[0]]
if !exists {
fmt.Fprintf(os.Stderr, "Unrecognized command: %s\n", args[0])
usage(parser)
os.Exit(1)
}
// Execute the command.
err = commandHandler(cfg, args[0], data, args[1:])
if err != nil {
if err == ErrUsage {
usage(parser)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
|
package hookit
import (
"fmt"
"strings"
"github.com/nanobox-io/nanobox/commands/registry"
"github.com/nanobox-io/nanobox/util"
"github.com/nanobox-io/nanobox/util/console"
"github.com/nanobox-io/nanobox/util/display"
)
// Exec executes a hook inside of a container
func Exec(container, hook, payload, displayLevel string) (string, error) {
out, err := util.DockerExec(container, "root", "/opt/nanobox/hooks/"+hook, []string{payload}, display.NewStreamer(displayLevel))
if err != nil && (strings.Contains(string(out), "such file or directory") && strings.Contains(err.Error(), "bad exit code(126)")) {
// if its a 126 the hook didnt exist
return "", nil
}
if err != nil {
return out, util.ErrorfQuiet("failed to execute hook (%s) on %s: %s", hook, container, err)
}
return out, nil
}
func DebugExec(container, hook, payload, displayLevel string) (string, error) {
res, err := Exec(container, hook, payload, displayLevel)
// leave early if no error
if err == nil {
return res, err
}
display.ErrorTask()
err = fmt.Errorf("failed to execute %s hook: %s", hook, err.Error())
if registry.GetBool("debug") {
fmt.Printf("An error has occurred: \"%s\"\n", err)
fmt.Println("Entering Debug Mode")
fmt.Printf(" container: %s\n", container)
fmt.Printf(" hook: %s\n", hook)
fmt.Printf(" payload: %s\n", payload)
err := console.Run(container, console.ConsoleConfig{})
if err != nil {
return res, fmt.Errorf("failed to establish a debug session: %s", err.Error())
}
}
// try running the exec one more time.
return Exec(container, hook, payload, displayLevel)
}
add debug output
package hookit
import (
"fmt"
"strings"
"github.com/nanobox-io/nanobox/commands/registry"
"github.com/nanobox-io/nanobox/util"
"github.com/nanobox-io/nanobox/util/console"
"github.com/nanobox-io/nanobox/util/display"
)
// Exec executes a hook inside of a container
func Exec(container, hook, payload, displayLevel string) (string, error) {
out, err := util.DockerExec(container, "root", "/opt/nanobox/hooks/"+hook, []string{payload}, display.NewStreamer(displayLevel))
if err != nil && (strings.Contains(string(out), "such file or directory") && strings.Contains(err.Error(), "bad exit code(126)")) {
// if its a 126 the hook didnt exist
return "", nil
}
if err != nil {
return out, util.ErrorfQuiet("failed to execute hook (%s) on %s: %s", hook, container, err)
}
return out, nil
}
func DebugExec(container, hook, payload, displayLevel string) (string, error) {
res, err := Exec(container, hook, payload, displayLevel)
// leave early if no error
if err == nil {
return res, err
}
display.ErrorTask()
err = fmt.Errorf("failed to execute %s hook: %s", hook, err.Error())
if registry.GetBool("debug") {
fmt.Printf("An error has occurred: \"%s\"\n", res)
fmt.Println("Entering Debug Mode")
fmt.Printf(" container: %s\n", container)
fmt.Printf(" hook: %s\n", hook)
fmt.Printf(" payload: %s\n", payload)
err := console.Run(container, console.ConsoleConfig{})
if err != nil {
return res, fmt.Errorf("failed to establish a debug session: %s", err.Error())
}
}
// try running the exec one more time.
return Exec(container, hook, payload, displayLevel)
}
|
package spanish
import (
"github.com/kljensen/snowball/snowballword"
"testing"
)
// Test stopWords for things we know should be true
// or false.
//
func Test_stopWords(t *testing.T) {
testCases := []struct {
word string
result bool
}{
{"el", true},
{"queso", false},
}
for _, testCase := range testCases {
result := isStopWord(testCase.word)
if result != testCase.result {
t.Errorf("Expect isStopWord(\"%v\") = %v, but got %v",
testCase.word, testCase.result, result,
)
}
}
}
// Test isLowerVowel for things we know should be true
// or false.
//
func Test_isLowerVowel(t *testing.T) {
testCases := []struct {
chars string
result bool
}{
// These are all vowels.
{"aeiouáéíóúü", true},
// None of these are vowels.
{"cbfqhkl", false},
}
for _, testCase := range testCases {
for _, r := range testCase.chars {
result := isLowerVowel(r)
if result != testCase.result {
t.Errorf("Expect isLowerVowel(\"%v\") = %v, but got %v",
r, testCase.result, result,
)
}
}
}
}
// Test isLowerVowel for things we know should be true
// or false.
//
func Test_findRegions(t *testing.T) {
testCases := []struct {
word string
r1start int
r2start int
rvstart int
}{
{"macho", 3, 5, 4},
{"olivia", 2, 4, 3},
{"trabajo", 4, 6, 3},
{"áureo", 3, 5, 3},
{"piñaolayas", 3, 6, 3},
{"terminales", 3, 6, 3},
{"durmió", 3, 6, 3},
{"cobija", 3, 5, 3},
{"anderson", 2, 5, 4},
{"cervezas", 3, 6, 3},
{"climáticas", 4, 6, 3},
{"expide", 2, 5, 4},
{"cenizas", 3, 5, 3},
{"maximiliano", 3, 5, 3},
{"específicos", 2, 5, 4},
{"menor", 3, 5, 3},
{"generis", 3, 5, 3},
{"casero", 3, 5, 3},
{"pululan", 3, 5, 3},
{"suscitado", 3, 6, 3},
{"pesadez", 3, 5, 3},
{"interno", 2, 5, 4},
{"agredido", 2, 5, 4},
{"desprendía", 3, 7, 3},
{"vistazo", 3, 6, 3},
{"frecuentan", 4, 7, 3},
{"noviembre", 3, 6, 3},
{"sintética", 3, 6, 3},
{"newagismo", 3, 5, 3},
{"eliseo", 2, 4, 3},
{"desbordado", 3, 6, 3},
{"dispongo", 3, 6, 3},
{"dilatar", 3, 5, 3},
{"xochitl", 3, 6, 3},
{"proporcionaba", 4, 6, 3},
{"pue", 3, 3, 3},
{"alpargatado", 2, 5, 4},
{"exigida", 2, 4, 3},
{"céntricas", 3, 7, 3},
{"prende", 4, 6, 3},
{"estructural", 2, 6, 5},
{"ilegalmente", 2, 4, 3},
{"freeport", 5, 7, 3},
{"sonrisas", 3, 6, 3},
{"cobró", 3, 5, 3},
{"dioses", 4, 6, 3},
{"consistieron", 3, 6, 3},
{"policiales", 3, 5, 3},
{"conciliador", 3, 6, 3},
{"fierro", 4, 6, 3},
{"aparadores", 2, 4, 3},
{"coreados", 3, 6, 3},
{"posición", 3, 5, 3},
{"adversidades", 2, 5, 4},
{"comprometido", 3, 7, 3},
{"aventuras", 2, 4, 3},
{"santiso", 3, 6, 3},
{"talentos", 3, 5, 3},
{"apreciar", 2, 5, 4},
{"sprints", 5, 7, 4},
{"zarco", 3, 5, 3},
{"concretos", 3, 7, 3},
{"gavica", 3, 5, 3},
{"suavemente", 4, 6, 3},
{"españolitos", 2, 5, 4},
{"grabará", 4, 6, 3},
{"entregados", 2, 6, 5},
{"gustaría", 3, 6, 3},
{"nickin", 3, 6, 3},
{"sogem", 3, 5, 3},
{"prohíbe", 4, 6, 3},
{"espinoso", 2, 5, 4},
{"atraviesan", 2, 5, 4},
{"bancomext", 3, 6, 3},
{"paraguay", 3, 5, 3},
{"amamos", 2, 4, 3},
{"consigna", 3, 6, 3},
{"funcionarios", 3, 7, 3},
{"marquis", 3, 7, 3},
{"desactivaron", 3, 5, 3},
{"concentrados", 3, 6, 3},
{"democratizante", 3, 5, 3},
{"afianzadora", 2, 5, 3},
{"homicidio", 3, 5, 3},
{"promovidos", 4, 6, 3},
{"maquiladora", 3, 6, 3},
{"bike", 3, 4, 3},
{"recuerdos", 3, 6, 3},
{"géneros", 3, 5, 3},
{"rechaza", 3, 6, 3},
{"sentarían", 3, 6, 3},
{"quererlo", 4, 6, 3},
{"sofisticado", 3, 5, 3},
{"miriam", 3, 6, 3},
{"echara", 2, 5, 4},
{"mico", 3, 4, 3},
{"enferma", 2, 5, 4},
{"reforzamiento", 3, 5, 3},
{"circunscrito", 3, 6, 3},
{"indiana", 2, 6, 4},
{"metrópoli", 3, 6, 3},
{"libreta", 3, 6, 3},
{"gonzalez", 3, 6, 3},
{"antidemocrática", 2, 5, 4},
}
for _, testCase := range testCases {
w := snowballword.New(testCase.word)
r1start, r2start, rvstart := findRegions(w)
if r1start != testCase.r1start || r2start != testCase.r2start || rvstart != testCase.rvstart {
t.Errorf("Expect findRegions(\"%v\") = %v, %v, %v, but got %v, %v, %v",
testCase.word, testCase.r1start, testCase.r2start, testCase.rvstart,
r1start, r2start, rvstart,
)
}
}
}
type stepFunc func(*snowballword.SnowballWord) bool
type stepTest struct {
WordIn string
R1start int
R2start int
RVstart int
Changed bool
WordOut string
R1startOut int
R2startOut int
RVstartOut int
}
func runStepTest(t *testing.T, f stepFunc, tcs []stepTest) {
for _, testCase := range tcs {
w := snowballword.New(testCase.WordIn)
w.R1start = testCase.R1start
w.R2start = testCase.R2start
w.RVstart = testCase.RVstart
retval := f(w)
if retval != testCase.Changed || w.String() != testCase.WordOut || w.R1start != testCase.R1startOut || w.R2start != testCase.R2startOut || w.RVstart != testCase.RVstartOut {
t.Errorf("Expected %v -> \"{%v, %v, %v, %v}\", but got \"{%v, %v, %v, %v}\"", testCase.WordIn, testCase.WordOut, testCase.R1startOut, testCase.R2startOut, testCase.RVstartOut, w.String(), w.R1start, w.R2start, w.RVstart)
}
}
}
// Test step0, the removal of pronoun suffixes.
//
func Test_step0(t *testing.T) {
testCases := []stepTest{
{"liberarlo", 3, 5, 3, true, "liberar", 3, 5, 3},
{"ejecutarse", 2, 4, 3, true, "ejecutar", 2, 4, 3},
{"convirtiéndolas", 3, 6, 3, true, "convirtiendo", 3, 6, 3},
{"perfeccionarlo", 3, 6, 3, true, "perfeccionar", 3, 6, 3},
{"formarlo", 3, 6, 3, true, "formar", 3, 6, 3},
{"negociarlo", 3, 5, 3, true, "negociar", 3, 5, 3},
{"dirigirla", 3, 5, 3, true, "dirigir", 3, 5, 3},
{"malograrlas", 3, 5, 3, true, "malograr", 3, 5, 3},
{"atacarlos", 2, 4, 3, true, "atacar", 2, 4, 3},
{"originarla", 2, 4, 3, true, "originar", 2, 4, 3},
{"ponerlos", 3, 5, 3, true, "poner", 3, 5, 3},
{"ubicándolo", 2, 4, 3, true, "ubicando", 2, 4, 3},
{"dejarme", 3, 5, 3, true, "dejar", 3, 5, 3},
{"regalarnos", 3, 5, 3, true, "regalar", 3, 5, 3},
{"resolverlas", 3, 5, 3, true, "resolver", 3, 5, 3},
{"esperarse", 2, 5, 4, true, "esperar", 2, 5, 4},
{"cuidarlo", 4, 6, 3, true, "cuidar", 4, 6, 3},
{"empezarlos", 2, 5, 4, true, "empezar", 2, 5, 4},
{"gastarla", 3, 6, 3, true, "gastar", 3, 6, 3},
{"levantarme", 3, 5, 3, true, "levantar", 3, 5, 3},
{"ausentarse", 3, 5, 3, true, "ausentar", 3, 5, 3},
{"colocándose", 3, 5, 3, true, "colocando", 3, 5, 3},
{"suponerse", 3, 5, 3, true, "suponer", 3, 5, 3},
{"someterlos", 3, 5, 3, true, "someter", 3, 5, 3},
{"criticarlos", 4, 6, 3, true, "criticar", 4, 6, 3},
{"consolidarlo", 3, 6, 3, true, "consolidar", 3, 6, 3},
{"globalizarse", 4, 6, 3, true, "globalizar", 4, 6, 3},
{"corregirla", 3, 6, 3, true, "corregir", 3, 6, 3},
{"aplicarle", 2, 5, 4, true, "aplicar", 2, 5, 4},
{"casarse", 3, 5, 3, true, "casar", 3, 5, 3},
{"costándole", 3, 6, 3, true, "costando", 3, 6, 3},
{"rescindirlo", 3, 6, 3, true, "rescindir", 3, 6, 3},
{"quitándole", 4, 6, 3, true, "quitando", 4, 6, 3},
{"conservarse", 3, 6, 3, true, "conservar", 3, 6, 3},
{"venderlo", 3, 6, 3, true, "vender", 3, 6, 3},
{"garantizarse", 3, 5, 3, true, "garantizar", 3, 5, 3},
{"disfrutarse", 3, 7, 3, true, "disfrutar", 3, 7, 3},
{"comunicarse", 3, 5, 3, true, "comunicar", 3, 5, 3},
{"propiciarse", 4, 6, 3, true, "propiciar", 4, 6, 3},
{"otorgarnos", 2, 4, 3, true, "otorgar", 2, 4, 3},
{"contorsionarse", 3, 6, 3, true, "contorsionar", 3, 6, 3},
{"motivarlas", 3, 5, 3, true, "motivar", 3, 5, 3},
{"congelarse", 3, 6, 3, true, "congelar", 3, 6, 3},
{"generandoles", 3, 5, 3, true, "generando", 3, 5, 3},
{"evitarlo", 2, 4, 3, true, "evitar", 2, 4, 3},
{"atenderlos", 2, 4, 3, true, "atender", 2, 4, 3},
{"apoyándola", 2, 4, 3, true, "apoyando", 2, 4, 3},
{"pasarse", 3, 5, 3, true, "pasar", 3, 5, 3},
{"escucharlos", 2, 5, 4, true, "escuchar", 2, 5, 4},
{"intervenirse", 2, 5, 4, true, "intervenir", 2, 5, 4},
{"contratarle", 3, 7, 3, true, "contratar", 3, 7, 3},
{"retirándose", 3, 5, 3, true, "retirando", 3, 5, 3},
{"quitarles", 4, 6, 3, true, "quitar", 4, 6, 3},
{"reforzarlas", 3, 5, 3, true, "reforzar", 3, 5, 3},
{"obtenerla", 2, 5, 4, true, "obtener", 2, 5, 4},
{"considerarlo", 3, 6, 3, true, "considerar", 3, 6, 3},
{"regresarse", 3, 6, 3, true, "regresar", 3, 6, 3},
{"ponerse", 3, 5, 3, true, "poner", 3, 5, 3},
{"llevándose", 4, 6, 3, true, "llevando", 4, 6, 3},
{"ocuparse", 2, 4, 3, true, "ocupar", 2, 4, 3},
{"aprovecharse", 2, 5, 4, true, "aprovechar", 2, 5, 4},
{"corregirlo", 3, 6, 3, true, "corregir", 3, 6, 3},
{"probarle", 4, 6, 3, true, "probar", 4, 6, 3},
{"comernos", 3, 5, 3, true, "comer", 3, 5, 3},
{"iniciarme", 2, 4, 3, true, "iniciar", 2, 4, 3},
{"concentrarse", 3, 6, 3, true, "concentrar", 3, 6, 3},
{"llevarse", 4, 6, 3, true, "llevar", 4, 6, 3},
{"difundirlo", 3, 5, 3, true, "difundir", 3, 5, 3},
{"basándose", 3, 5, 3, true, "basando", 3, 5, 3},
{"destinarlos", 3, 6, 3, true, "destinar", 3, 6, 3},
{"reubicarse", 4, 6, 3, true, "reubicar", 4, 6, 3},
{"manteniéndose", 3, 6, 3, true, "manteniendo", 3, 6, 3},
{"colocarla", 3, 5, 3, true, "colocar", 3, 5, 3},
{"pasarles", 3, 5, 3, true, "pasar", 3, 5, 3},
{"depositarse", 3, 5, 3, true, "depositar", 3, 5, 3},
{"tragarse", 4, 6, 3, true, "tragar", 4, 6, 3},
{"eliminarla", 2, 4, 3, true, "eliminar", 2, 4, 3},
{"eliminarse", 2, 4, 3, true, "eliminar", 2, 4, 3},
{"apegarnos", 2, 4, 3, true, "apegar", 2, 4, 3},
{"asociarse", 2, 4, 3, true, "asociar", 2, 4, 3},
{"cambiarlos", 3, 7, 3, true, "cambiar", 3, 7, 3},
{"envolviéndose", 2, 5, 4, true, "envolviendo", 2, 5, 4},
{"lograrse", 3, 6, 3, true, "lograr", 3, 6, 3},
{"mostrarse", 3, 7, 3, true, "mostrar", 3, 7, 3},
{"pasarle", 3, 5, 3, true, "pasar", 3, 5, 3},
{"enfrentándose", 2, 6, 5, true, "enfrentando", 2, 6, 5},
{"permitirse", 3, 6, 3, true, "permitir", 3, 6, 3},
{"sanearlas", 3, 6, 3, true, "sanear", 3, 6, 3},
{"refugiarse", 3, 5, 3, true, "refugiar", 3, 5, 3},
{"relacionarse", 3, 5, 3, true, "relacionar", 3, 5, 3},
{"sacarlo", 3, 5, 3, true, "sacar", 3, 5, 3},
{"organizarse", 2, 5, 4, true, "organizar", 2, 5, 4},
{"familiarizarse", 3, 5, 3, true, "familiarizar", 3, 5, 3},
{"decidirse", 3, 5, 3, true, "decidir", 3, 5, 3},
{"tomarle", 3, 5, 3, true, "tomar", 3, 5, 3},
{"volverlas", 3, 6, 3, true, "volver", 3, 6, 3},
{"efectuarse", 2, 4, 3, true, "efectuar", 2, 4, 3},
{"elegirse", 2, 4, 3, true, "elegir", 2, 4, 3},
{"establecerse", 2, 5, 4, true, "establecer", 2, 5, 4},
{"ponerles", 3, 5, 3, true, "poner", 3, 5, 3},
}
runStepTest(t, step0, testCases)
}
Fix incorrect test for region finding.
package spanish
import (
"github.com/kljensen/snowball/snowballword"
"testing"
)
// Test stopWords for things we know should be true
// or false.
//
func Test_stopWords(t *testing.T) {
testCases := []struct {
word string
result bool
}{
{"el", true},
{"queso", false},
}
for _, testCase := range testCases {
result := isStopWord(testCase.word)
if result != testCase.result {
t.Errorf("Expect isStopWord(\"%v\") = %v, but got %v",
testCase.word, testCase.result, result,
)
}
}
}
// Test isLowerVowel for things we know should be true
// or false.
//
func Test_isLowerVowel(t *testing.T) {
testCases := []struct {
chars string
result bool
}{
// These are all vowels.
{"aeiouáéíóúü", true},
// None of these are vowels.
{"cbfqhkl", false},
}
for _, testCase := range testCases {
for _, r := range testCase.chars {
result := isLowerVowel(r)
if result != testCase.result {
t.Errorf("Expect isLowerVowel(\"%v\") = %v, but got %v",
r, testCase.result, result,
)
}
}
}
}
// Test isLowerVowel for things we know should be true
// or false.
//
func Test_findRegions(t *testing.T) {
testCases := []struct {
word string
r1start int
r2start int
rvstart int
}{
{"macho", 3, 5, 3},
{"olivia", 2, 4, 3},
{"trabajo", 4, 6, 3},
{"áureo", 3, 5, 3},
{"piñaolayas", 3, 6, 3},
{"terminales", 3, 6, 3},
{"durmió", 3, 6, 3},
{"cobija", 3, 5, 3},
{"anderson", 2, 5, 4},
{"cervezas", 3, 6, 3},
{"climáticas", 4, 6, 3},
{"expide", 2, 5, 4},
{"cenizas", 3, 5, 3},
{"maximiliano", 3, 5, 3},
{"específicos", 2, 5, 4},
{"menor", 3, 5, 3},
{"generis", 3, 5, 3},
{"casero", 3, 5, 3},
{"pululan", 3, 5, 3},
{"suscitado", 3, 6, 3},
{"pesadez", 3, 5, 3},
{"interno", 2, 5, 4},
{"agredido", 2, 5, 4},
{"desprendía", 3, 7, 3},
{"vistazo", 3, 6, 3},
{"frecuentan", 4, 7, 3},
{"noviembre", 3, 6, 3},
{"sintética", 3, 6, 3},
{"newagismo", 3, 5, 3},
{"eliseo", 2, 4, 3},
{"desbordado", 3, 6, 3},
{"dispongo", 3, 6, 3},
{"dilatar", 3, 5, 3},
{"xochitl", 3, 6, 3},
{"proporcionaba", 4, 6, 3},
{"pue", 3, 3, 3},
{"alpargatado", 2, 5, 4},
{"exigida", 2, 4, 3},
{"céntricas", 3, 7, 3},
{"prende", 4, 6, 3},
{"estructural", 2, 6, 5},
{"ilegalmente", 2, 4, 3},
{"freeport", 5, 7, 3},
{"sonrisas", 3, 6, 3},
{"cobró", 3, 5, 3},
{"dioses", 4, 6, 3},
{"consistieron", 3, 6, 3},
{"policiales", 3, 5, 3},
{"conciliador", 3, 6, 3},
{"fierro", 4, 6, 3},
{"aparadores", 2, 4, 3},
{"coreados", 3, 6, 3},
{"posición", 3, 5, 3},
{"adversidades", 2, 5, 4},
{"comprometido", 3, 7, 3},
{"aventuras", 2, 4, 3},
{"santiso", 3, 6, 3},
{"talentos", 3, 5, 3},
{"apreciar", 2, 5, 4},
{"sprints", 5, 7, 4},
{"zarco", 3, 5, 3},
{"concretos", 3, 7, 3},
{"gavica", 3, 5, 3},
{"suavemente", 4, 6, 3},
{"españolitos", 2, 5, 4},
{"grabará", 4, 6, 3},
{"entregados", 2, 6, 5},
{"gustaría", 3, 6, 3},
{"nickin", 3, 6, 3},
{"sogem", 3, 5, 3},
{"prohíbe", 4, 6, 3},
{"espinoso", 2, 5, 4},
{"atraviesan", 2, 5, 4},
{"bancomext", 3, 6, 3},
{"paraguay", 3, 5, 3},
{"amamos", 2, 4, 3},
{"consigna", 3, 6, 3},
{"funcionarios", 3, 7, 3},
{"marquis", 3, 7, 3},
{"desactivaron", 3, 5, 3},
{"concentrados", 3, 6, 3},
{"democratizante", 3, 5, 3},
{"afianzadora", 2, 5, 3},
{"homicidio", 3, 5, 3},
{"promovidos", 4, 6, 3},
{"maquiladora", 3, 6, 3},
{"bike", 3, 4, 3},
{"recuerdos", 3, 6, 3},
{"géneros", 3, 5, 3},
{"rechaza", 3, 6, 3},
{"sentarían", 3, 6, 3},
{"quererlo", 4, 6, 3},
{"sofisticado", 3, 5, 3},
{"miriam", 3, 6, 3},
{"echara", 2, 5, 4},
{"mico", 3, 4, 3},
{"enferma", 2, 5, 4},
{"reforzamiento", 3, 5, 3},
{"circunscrito", 3, 6, 3},
{"indiana", 2, 6, 4},
{"metrópoli", 3, 6, 3},
{"libreta", 3, 6, 3},
{"gonzalez", 3, 6, 3},
{"antidemocrática", 2, 5, 4},
}
for _, testCase := range testCases {
w := snowballword.New(testCase.word)
r1start, r2start, rvstart := findRegions(w)
if r1start != testCase.r1start || r2start != testCase.r2start || rvstart != testCase.rvstart {
t.Errorf("Expect findRegions(\"%v\") = %v, %v, %v, but got %v, %v, %v",
testCase.word, testCase.r1start, testCase.r2start, testCase.rvstart,
r1start, r2start, rvstart,
)
}
}
}
type stepFunc func(*snowballword.SnowballWord) bool
type stepTest struct {
WordIn string
R1start int
R2start int
RVstart int
Changed bool
WordOut string
R1startOut int
R2startOut int
RVstartOut int
}
func runStepTest(t *testing.T, f stepFunc, tcs []stepTest) {
for _, testCase := range tcs {
w := snowballword.New(testCase.WordIn)
w.R1start = testCase.R1start
w.R2start = testCase.R2start
w.RVstart = testCase.RVstart
retval := f(w)
if retval != testCase.Changed || w.String() != testCase.WordOut || w.R1start != testCase.R1startOut || w.R2start != testCase.R2startOut || w.RVstart != testCase.RVstartOut {
t.Errorf("Expected %v -> \"{%v, %v, %v, %v}\", but got \"{%v, %v, %v, %v}\"", testCase.WordIn, testCase.WordOut, testCase.R1startOut, testCase.R2startOut, testCase.RVstartOut, w.String(), w.R1start, w.R2start, w.RVstart)
}
}
}
// Test step0, the removal of pronoun suffixes.
//
func Test_step0(t *testing.T) {
testCases := []stepTest{
{"liberarlo", 3, 5, 3, true, "liberar", 3, 5, 3},
{"ejecutarse", 2, 4, 3, true, "ejecutar", 2, 4, 3},
{"convirtiéndolas", 3, 6, 3, true, "convirtiendo", 3, 6, 3},
{"perfeccionarlo", 3, 6, 3, true, "perfeccionar", 3, 6, 3},
{"formarlo", 3, 6, 3, true, "formar", 3, 6, 3},
{"negociarlo", 3, 5, 3, true, "negociar", 3, 5, 3},
{"dirigirla", 3, 5, 3, true, "dirigir", 3, 5, 3},
{"malograrlas", 3, 5, 3, true, "malograr", 3, 5, 3},
{"atacarlos", 2, 4, 3, true, "atacar", 2, 4, 3},
{"originarla", 2, 4, 3, true, "originar", 2, 4, 3},
{"ponerlos", 3, 5, 3, true, "poner", 3, 5, 3},
{"ubicándolo", 2, 4, 3, true, "ubicando", 2, 4, 3},
{"dejarme", 3, 5, 3, true, "dejar", 3, 5, 3},
{"regalarnos", 3, 5, 3, true, "regalar", 3, 5, 3},
{"resolverlas", 3, 5, 3, true, "resolver", 3, 5, 3},
{"esperarse", 2, 5, 4, true, "esperar", 2, 5, 4},
{"cuidarlo", 4, 6, 3, true, "cuidar", 4, 6, 3},
{"empezarlos", 2, 5, 4, true, "empezar", 2, 5, 4},
{"gastarla", 3, 6, 3, true, "gastar", 3, 6, 3},
{"levantarme", 3, 5, 3, true, "levantar", 3, 5, 3},
{"ausentarse", 3, 5, 3, true, "ausentar", 3, 5, 3},
{"colocándose", 3, 5, 3, true, "colocando", 3, 5, 3},
{"suponerse", 3, 5, 3, true, "suponer", 3, 5, 3},
{"someterlos", 3, 5, 3, true, "someter", 3, 5, 3},
{"criticarlos", 4, 6, 3, true, "criticar", 4, 6, 3},
{"consolidarlo", 3, 6, 3, true, "consolidar", 3, 6, 3},
{"globalizarse", 4, 6, 3, true, "globalizar", 4, 6, 3},
{"corregirla", 3, 6, 3, true, "corregir", 3, 6, 3},
{"aplicarle", 2, 5, 4, true, "aplicar", 2, 5, 4},
{"casarse", 3, 5, 3, true, "casar", 3, 5, 3},
{"costándole", 3, 6, 3, true, "costando", 3, 6, 3},
{"rescindirlo", 3, 6, 3, true, "rescindir", 3, 6, 3},
{"quitándole", 4, 6, 3, true, "quitando", 4, 6, 3},
{"conservarse", 3, 6, 3, true, "conservar", 3, 6, 3},
{"venderlo", 3, 6, 3, true, "vender", 3, 6, 3},
{"garantizarse", 3, 5, 3, true, "garantizar", 3, 5, 3},
{"disfrutarse", 3, 7, 3, true, "disfrutar", 3, 7, 3},
{"comunicarse", 3, 5, 3, true, "comunicar", 3, 5, 3},
{"propiciarse", 4, 6, 3, true, "propiciar", 4, 6, 3},
{"otorgarnos", 2, 4, 3, true, "otorgar", 2, 4, 3},
{"contorsionarse", 3, 6, 3, true, "contorsionar", 3, 6, 3},
{"motivarlas", 3, 5, 3, true, "motivar", 3, 5, 3},
{"congelarse", 3, 6, 3, true, "congelar", 3, 6, 3},
{"generandoles", 3, 5, 3, true, "generando", 3, 5, 3},
{"evitarlo", 2, 4, 3, true, "evitar", 2, 4, 3},
{"atenderlos", 2, 4, 3, true, "atender", 2, 4, 3},
{"apoyándola", 2, 4, 3, true, "apoyando", 2, 4, 3},
{"pasarse", 3, 5, 3, true, "pasar", 3, 5, 3},
{"escucharlos", 2, 5, 4, true, "escuchar", 2, 5, 4},
{"intervenirse", 2, 5, 4, true, "intervenir", 2, 5, 4},
{"contratarle", 3, 7, 3, true, "contratar", 3, 7, 3},
{"retirándose", 3, 5, 3, true, "retirando", 3, 5, 3},
{"quitarles", 4, 6, 3, true, "quitar", 4, 6, 3},
{"reforzarlas", 3, 5, 3, true, "reforzar", 3, 5, 3},
{"obtenerla", 2, 5, 4, true, "obtener", 2, 5, 4},
{"considerarlo", 3, 6, 3, true, "considerar", 3, 6, 3},
{"regresarse", 3, 6, 3, true, "regresar", 3, 6, 3},
{"ponerse", 3, 5, 3, true, "poner", 3, 5, 3},
{"llevándose", 4, 6, 3, true, "llevando", 4, 6, 3},
{"ocuparse", 2, 4, 3, true, "ocupar", 2, 4, 3},
{"aprovecharse", 2, 5, 4, true, "aprovechar", 2, 5, 4},
{"corregirlo", 3, 6, 3, true, "corregir", 3, 6, 3},
{"probarle", 4, 6, 3, true, "probar", 4, 6, 3},
{"comernos", 3, 5, 3, true, "comer", 3, 5, 3},
{"iniciarme", 2, 4, 3, true, "iniciar", 2, 4, 3},
{"concentrarse", 3, 6, 3, true, "concentrar", 3, 6, 3},
{"llevarse", 4, 6, 3, true, "llevar", 4, 6, 3},
{"difundirlo", 3, 5, 3, true, "difundir", 3, 5, 3},
{"basándose", 3, 5, 3, true, "basando", 3, 5, 3},
{"destinarlos", 3, 6, 3, true, "destinar", 3, 6, 3},
{"reubicarse", 4, 6, 3, true, "reubicar", 4, 6, 3},
{"manteniéndose", 3, 6, 3, true, "manteniendo", 3, 6, 3},
{"colocarla", 3, 5, 3, true, "colocar", 3, 5, 3},
{"pasarles", 3, 5, 3, true, "pasar", 3, 5, 3},
{"depositarse", 3, 5, 3, true, "depositar", 3, 5, 3},
{"tragarse", 4, 6, 3, true, "tragar", 4, 6, 3},
{"eliminarla", 2, 4, 3, true, "eliminar", 2, 4, 3},
{"eliminarse", 2, 4, 3, true, "eliminar", 2, 4, 3},
{"apegarnos", 2, 4, 3, true, "apegar", 2, 4, 3},
{"asociarse", 2, 4, 3, true, "asociar", 2, 4, 3},
{"cambiarlos", 3, 7, 3, true, "cambiar", 3, 7, 3},
{"envolviéndose", 2, 5, 4, true, "envolviendo", 2, 5, 4},
{"lograrse", 3, 6, 3, true, "lograr", 3, 6, 3},
{"mostrarse", 3, 7, 3, true, "mostrar", 3, 7, 3},
{"pasarle", 3, 5, 3, true, "pasar", 3, 5, 3},
{"enfrentándose", 2, 6, 5, true, "enfrentando", 2, 6, 5},
{"permitirse", 3, 6, 3, true, "permitir", 3, 6, 3},
{"sanearlas", 3, 6, 3, true, "sanear", 3, 6, 3},
{"refugiarse", 3, 5, 3, true, "refugiar", 3, 5, 3},
{"relacionarse", 3, 5, 3, true, "relacionar", 3, 5, 3},
{"sacarlo", 3, 5, 3, true, "sacar", 3, 5, 3},
{"organizarse", 2, 5, 4, true, "organizar", 2, 5, 4},
{"familiarizarse", 3, 5, 3, true, "familiarizar", 3, 5, 3},
{"decidirse", 3, 5, 3, true, "decidir", 3, 5, 3},
{"tomarle", 3, 5, 3, true, "tomar", 3, 5, 3},
{"volverlas", 3, 6, 3, true, "volver", 3, 6, 3},
{"efectuarse", 2, 4, 3, true, "efectuar", 2, 4, 3},
{"elegirse", 2, 4, 3, true, "elegir", 2, 4, 3},
{"establecerse", 2, 5, 4, true, "establecer", 2, 5, 4},
{"ponerles", 3, 5, 3, true, "poner", 3, 5, 3},
}
runStepTest(t, step0, testCases)
}
|
package main
import (
"encoding/base64"
"github.com/realglobe-Inc/edo/util"
"github.com/realglobe-Inc/go-lib-rg/erro"
"math/big"
"math/rand"
"sync/atomic"
"time"
)
type idGenerator struct {
// 乱数文字数の長さ。
randLen int
// インスタンス内での ID 被りを防ぐための通し番号。
ser int64
// インスタンスごとの ID 被りを防ぐために与えられる文字列。
suf string
}
func newIdGenerator(randLen int, suf string) idGenerator {
return idGenerator{
randLen: randLen,
ser: rand.New(rand.NewSource(time.Now().UnixNano())).Int63(),
suf: suf,
}
}
func (this *idGenerator) newId() (id string, err error) {
id, err = util.SecureRandomString(this.randLen)
if err != nil {
return "", erro.Wrap(err)
}
const bLen = 64 / 8 // int64
const sLen = bLen * 8 / 6 // BASE64 にして文字を使い切れない上位ビットは捨てる。
v := big.NewInt(atomic.AddInt64(&this.ser, 1))
v = v.Lsh(v, bLen*8-sLen*6) // BASE64 の 6 ビット区切りと最下位ビットの位置を揃える。
buff := v.Bytes()
if len(buff) < bLen {
// 上位を 0 詰め。
buff = append(make([]byte, bLen-len(buff)), buff...)
} else if len(buff) > bLen {
// 上位を捨てる。
buff = buff[len(buff)-bLen:]
}
id += base64.URLEncoding.EncodeToString(buff)[:sLen]
return id + this.suf, nil
}
ID 生成器に乱数部分の長さを指定できるメソッドを追加
package main
import (
"encoding/base64"
"github.com/realglobe-Inc/edo/util"
"github.com/realglobe-Inc/go-lib-rg/erro"
"math/big"
"math/rand"
"sync/atomic"
"time"
)
type idGenerator struct {
// 乱数文字数の長さ。
randLen int
// インスタンス内での ID 被りを防ぐための通し番号。
ser int64
// インスタンスごとの ID 被りを防ぐために与えられる文字列。
suf string
}
func newIdGenerator(randLen int, suf string) idGenerator {
return idGenerator{
randLen: randLen,
ser: rand.New(rand.NewSource(time.Now().UnixNano())).Int63(),
suf: suf,
}
}
func (this *idGenerator) newId() (id string, err error) {
return this.id(this.randLen)
}
// 乱数部分の長さを指定して ID を発行させる。
// randLen が 0 なら err は必ず nil。
func (this *idGenerator) id(randLen int) (id string, err error) {
id, err = util.SecureRandomString(randLen)
if err != nil {
return "", erro.Wrap(err)
}
const bLen = 64 / 8 // int64
const sLen = bLen * 8 / 6 // BASE64 にして文字を使い切れない上位ビットは捨てる。
v := big.NewInt(atomic.AddInt64(&this.ser, 1))
v = v.Lsh(v, bLen*8-sLen*6) // BASE64 の 6 ビット区切りと最下位ビットの位置を揃える。
buff := v.Bytes()
if len(buff) < bLen {
// 上位を 0 詰め。
buff = append(make([]byte, bLen-len(buff)), buff...)
} else if len(buff) > bLen {
// 上位を捨てる。
buff = buff[len(buff)-bLen:]
}
id += base64.URLEncoding.EncodeToString(buff)[:sLen]
return id + this.suf, nil
}
|
// Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
const (
projectVendor = `github.com/aws/amazon-ecs-agent/agent/vendor`
copyrightHeaderFormat = "// Copyright 2015-%v Amazon.com, Inc. or its affiliates. All Rights Reserved."
licenseBlock = `
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
`
)
func main() {
if len(os.Args) != 4 {
usage()
os.Exit(1)
}
packageName := os.Args[1]
interfaces := os.Args[2]
outputPath := os.Args[3]
err := generateMocks(packageName, interfaces, outputPath)
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if ok {
// Prevents swallowing CLI program errors that come
// from running mockgen and goimports
// https://golang.org/pkg/os/exec/#ExitError
fmt.Fprintln(os.Stderr, string(exitErr.Stderr))
}
// Print the encapsulating golang error type
fmt.Println(err)
os.Exit(1)
}
}
func generateMocks(packageName string, interfaces string, outputPath string) error {
copyrightHeader := fmt.Sprintf(copyrightHeaderFormat, time.Now().Year())
path, _ := filepath.Split(outputPath)
err := os.MkdirAll(path, os.ModeDir|0755)
if err != nil {
return err
}
outputFile, err := os.Create(outputPath)
defer outputFile.Close()
if err != nil {
return err
}
mockgen := exec.Command("mockgen", packageName, interfaces)
mockgenOut, err := mockgen.Output()
if err != nil {
return err
}
sanitized := strings.Replace(string(mockgenOut), projectVendor, "", -1)
withHeader := copyrightHeader + licenseBlock + sanitized
goimports := exec.Command("goimports")
goimports.Stdin = bytes.NewBufferString(withHeader)
goimports.Stdout = outputFile
return goimports.Run()
}
func usage() {
fmt.Println(os.Args[0], " PACKAGE INTERFACE_NAMES OUTPUT_FILE")
}
Update mockgen.go import removal pattern
Previously, it would remove the entire import line and depend on
goimports to correct the issue. goimports + mockgen doesn't always
play well with vendored dependencies.
New behavior only removes the vendor directory prefix, but still
runs goimport to validate / organize.
// Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
const (
projectVendor = `github.com/aws/amazon-ecs-agent/agent/vendor/`
copyrightHeaderFormat = "// Copyright 2015-%v Amazon.com, Inc. or its affiliates. All Rights Reserved."
licenseBlock = `
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
`
)
func main() {
if len(os.Args) != 4 {
usage()
os.Exit(1)
}
packageName := os.Args[1]
interfaces := os.Args[2]
outputPath := os.Args[3]
err := generateMocks(packageName, interfaces, outputPath)
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if ok {
// Prevents swallowing CLI program errors that come
// from running mockgen and goimports
// https://golang.org/pkg/os/exec/#ExitError
fmt.Fprintln(os.Stderr, string(exitErr.Stderr))
}
// Print the encapsulating golang error type
fmt.Println(err)
os.Exit(1)
}
}
func generateMocks(packageName string, interfaces string, outputPath string) error {
copyrightHeader := fmt.Sprintf(copyrightHeaderFormat, time.Now().Year())
path, _ := filepath.Split(outputPath)
err := os.MkdirAll(path, os.ModeDir|0755)
if err != nil {
return err
}
outputFile, err := os.Create(outputPath)
defer outputFile.Close()
if err != nil {
return err
}
mockgen := exec.Command("mockgen", packageName, interfaces)
mockgenOut, err := mockgen.Output()
if err != nil {
return err
}
sanitized := strings.Replace(string(mockgenOut), projectVendor, "", -1)
withHeader := copyrightHeader + licenseBlock + sanitized
goimports := exec.Command("goimports")
goimports.Stdin = bytes.NewBufferString(withHeader)
goimports.Stdout = outputFile
return goimports.Run()
}
func usage() {
fmt.Println(os.Args[0], " PACKAGE INTERFACE_NAMES OUTPUT_FILE")
}
|
package web
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/gorilla/mux"
"github.com/zenoss/glog"
"github.com/zenoss/go-json-rest"
"github.com/zenoss/serviced"
"github.com/zenoss/serviced/dao"
"mime"
"net/http"
"net/http/httputil"
"net/url"
)
type ServiceConfig struct {
bindPort string
agentPort string
zookeepers []string
stats bool
hostaliases []string
}
func NewServiceConfig(bindPort string, agentPort string, zookeepers []string, stats bool, hostaliases string) *ServiceConfig {
cfg := ServiceConfig{bindPort, agentPort, zookeepers, stats, []string{}}
if hostaliases != "" {
cfg.hostaliases = strings.Split(hostaliases, ":")
}
if len(cfg.agentPort) == 0 {
cfg.agentPort = "127.0.0.1:4979"
}
if len(cfg.zookeepers) == 0 {
cfg.zookeepers = []string{"127.0.0.1:2181"}
}
return &cfg
}
// Serve handles control plane web UI requests and virtual host requests for zenoss web based services.
// The UI server actually listens on port 7878, the uihandler defined here just reverse proxies to it.
// Virutal host routing to zenoss web based services is done by the vhosthandler function.
func (sc *ServiceConfig) Serve() {
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to get control plane client: %v", err)
return
}
// Reverse proxy to the web UI server.
uihandler := func(w http.ResponseWriter, r *http.Request) {
uiUrl, err := url.Parse("http://127.0.0.1:7878")
if err != nil {
glog.Errorf("Can't parse UI URL: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ui := httputil.NewSingleHostReverseProxy(uiUrl)
if ui == nil {
glog.Errorf("Can't proxy UI request: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ui.ServeHTTP(w, r)
}
// Lookup the appropriate virtual host and forward the request to it.
// TODO: when zookeeper registration is integrated we can be more event
// driven and only refresh the vhost map when service states change.
vhosthandler := func(w http.ResponseWriter, r *http.Request) {
var empty interface{}
services := []*dao.RunningService{}
client.GetRunningServices(&empty, &services)
vhosts := make(map[string][]*dao.ServiceState, 0)
for _, s := range services {
var svc dao.Service
if err := client.GetService(s.ServiceId, &svc); err != nil {
glog.Errorf("Can't get service: %s (%v)", s.Id, err)
}
vheps := svc.GetServiceVHosts()
for _, vhep := range vheps {
for _, vh := range vhep.VHosts {
svcstates := []*dao.ServiceState{}
if err := client.GetServiceStates(s.ServiceId, &svcstates); err != nil {
http.Error(w, fmt.Sprintf("can't retrieve service states for %s (%v)", s.ServiceId, err), http.StatusInternalServerError)
return
}
for _, ss := range svcstates {
vhosts[vh] = append(vhosts[vh], ss)
}
}
}
}
muxvars := mux.Vars(r)
svcstates, ok := vhosts[muxvars["subdomain"]]
if !ok {
http.Error(w, fmt.Sprintf("unknown vhost: %v", muxvars["subdomain"]), http.StatusNotFound)
return
}
// TODO: implement a more intelligent strategy than "always pick the first one" when more
// than one service state is mapped to a given virtual host
for _, svcep := range svcstates[0].Endpoints {
for _, vh := range svcep.VHosts {
if vh == muxvars["subdomain"] {
rp := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", svcstates[0].PrivateIp, svcep.PortNumber)})
rp.ServeHTTP(w, r)
return
}
}
}
http.Error(w, fmt.Sprintf("unrecognized endpoint: %s", muxvars["subdomain"]), http.StatusNotImplemented)
}
r := mux.NewRouter()
if hnm, err := os.Hostname(); err == nil {
sc.hostaliases = append(sc.hostaliases, hnm)
}
cmd := exec.Command("hostname", "--fqdn")
if hnm, err := cmd.CombinedOutput(); err == nil {
sc.hostaliases = append(sc.hostaliases, string(hnm[:len(hnm)-1]))
}
for _, ha := range sc.hostaliases {
r.HandleFunc("/", vhosthandler).Host(fmt.Sprintf("{subdomain}.%s", ha))
r.HandleFunc("/{path:.*}", vhosthandler).Host(fmt.Sprintf("{subdomain}.%s", ha))
}
r.HandleFunc("/{path:.*}", uihandler)
http.Handle("/", r)
certfile, err := serviced.TempCertFile()
if err != nil {
glog.Error("Could not prepare cert.pem file.")
}
keyfile, err := serviced.TempKeyFile()
if err != nil {
glog.Error("Could not prepare key.pem file.")
}
http.ListenAndServeTLS(sc.bindPort, certfile, keyfile, nil)
}
func (this *ServiceConfig) ServeUI() {
mime.AddExtensionType(".json", "application/json")
mime.AddExtensionType(".woff", "application/font-woff")
handler := rest.ResourceHandler{
EnableRelaxedContentType: true,
}
routes := []rest.Route{
rest.Route{"GET", "/", MainPage},
rest.Route{"GET", "/test", TestPage},
rest.Route{"GET", "/stats", this.IsCollectingStats()},
// Hosts
rest.Route{"GET", "/hosts", this.AuthorizedClient(RestGetHosts)},
rest.Route{"POST", "/hosts/add", this.AuthorizedClient(RestAddHost)},
rest.Route{"DELETE", "/hosts/:hostId", this.AuthorizedClient(RestRemoveHost)},
rest.Route{"PUT", "/hosts/:hostId", this.AuthorizedClient(RestUpdateHost)},
rest.Route{"GET", "/hosts/:hostId/running", this.AuthorizedClient(RestGetRunningForHost)},
rest.Route{"DELETE", "/hosts/:hostId/:serviceStateId", this.AuthorizedClient(RestKillRunning)},
// Pools
rest.Route{"POST", "/pools/add", this.AuthorizedClient(RestAddPool)},
rest.Route{"GET", "/pools/:poolId/hosts", this.AuthorizedClient(RestGetHostsForResourcePool)},
rest.Route{"DELETE", "/pools/:poolId", this.AuthorizedClient(RestRemovePool)},
rest.Route{"PUT", "/pools/:poolId", this.AuthorizedClient(RestUpdatePool)},
rest.Route{"GET", "/pools", this.AuthorizedClient(RestGetPools)},
// Services (Apps)
rest.Route{"GET", "/services", this.AuthorizedClient(RestGetAllServices)},
rest.Route{"GET", "/services/:serviceId", this.AuthorizedClient(RestGetService)},
rest.Route{"GET", "/services/:serviceId/running", this.AuthorizedClient(RestGetRunningForService)},
rest.Route{"GET", "/services/:serviceId/running/:serviceStateId", this.AuthorizedClient(RestGetRunningService)},
rest.Route{"GET", "/services/:serviceId/:serviceStateId/logs", this.AuthorizedClient(RestGetServiceStateLogs)},
rest.Route{"POST", "/services/add", this.AuthorizedClient(RestAddService)},
rest.Route{"DELETE", "/services/:serviceId", this.AuthorizedClient(RestRemoveService)},
rest.Route{"GET", "/services/:serviceId/logs", this.AuthorizedClient(RestGetServiceLogs)},
rest.Route{"PUT", "/services/:serviceId", this.AuthorizedClient(RestUpdateService)},
rest.Route{"GET", "/services/:serviceId/snapshot", this.AuthorizedClient(RestSnapshotService)},
rest.Route{"PUT", "/services/:serviceId/startService", this.AuthorizedClient(RestStartService)},
rest.Route{"PUT", "/services/:serviceId/stopService", this.AuthorizedClient(RestStopService)},
// Services (Virtual Host)
rest.Route{"GET", "/vhosts", this.AuthorizedClient(RestGetVirtualHosts)},
rest.Route{"POST", "/vhosts/:serviceId/:application/:vhostName", this.AuthorizedClient(RestAddVirtualHost)},
rest.Route{"DELETE", "/vhosts/:serviceId/:application/:vhostName", this.AuthorizedClient(RestRemoveVirtualHost)},
// Service templates (App templates)
rest.Route{"GET", "/templates", this.AuthorizedClient(RestGetAppTemplates)},
rest.Route{"POST", "/templates/deploy", this.AuthorizedClient(RestDeployAppTemplate)},
// Login
rest.Route{"POST", "/login", this.UnAuthorizedClient(RestLogin)},
rest.Route{"DELETE", "/login", RestLogout},
// "Misc" stuff
rest.Route{"GET", "/top/services", this.AuthorizedClient(RestGetTopServices)},
rest.Route{"GET", "/running", this.AuthorizedClient(RestGetAllRunning)},
// Generic static data
rest.Route{"GET", "/favicon.ico", FavIcon},
rest.Route{"GET", "/static*resource", StaticData},
}
// Hardcoding these target URLs for now.
// TODO: When internal services are allowed to run on other hosts, look that up.
routes = routeToInternalServiceProxy("/elastic", "http://127.0.0.1:9200/", routes)
routes = routeToInternalServiceProxy("/api/controlplane/elastic", "http://127.0.0.1:9200/", routes)
routes = routeToInternalServiceProxy("/metrics", "http://127.0.0.1:8888/", routes)
handler.SetRoutes(routes...)
http.ListenAndServe(":7878", &handler)
}
var methods []string = []string{"GET", "POST", "PUT", "DELETE"}
func routeToInternalServiceProxy(path string, target string, routes []rest.Route) []rest.Route {
targetUrl, err := url.Parse(target)
if err != nil {
glog.Errorf("Unable to parse proxy target URL: %s", target)
return routes
}
// Wrap the normal http.Handler in a rest.HandlerFunc
handlerFunc := func(w *rest.ResponseWriter, r *rest.Request) {
proxy := serviced.NewReverseProxy(path, targetUrl)
proxy.ServeHTTP(w.ResponseWriter, r.Request)
}
// Add on a glob to match subpaths
andsubpath := path + "*x"
for _, method := range methods {
routes = append(routes, rest.Route{method, path, handlerFunc})
routes = append(routes, rest.Route{method, andsubpath, handlerFunc})
}
return routes
}
func (this *ServiceConfig) UnAuthorizedClient(realfunc HandlerClientFunc) HandlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
client, err := this.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
RestServerError(w)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (this *ServiceConfig) AuthorizedClient(realfunc HandlerClientFunc) HandlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !LoginOk(r) {
RestUnauthorized(w)
return
}
client, err := this.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
RestServerError(w)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (this *ServiceConfig) IsCollectingStats() HandlerFunc {
if this.stats {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusOK)
}
} else {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusNotImplemented)
}
}
}
func (this *ServiceConfig) getClient() (c *serviced.ControlClient, err error) {
// setup the client
c, err = serviced.NewControlClient(this.agentPort)
if err != nil {
glog.Fatalf("Could not create a control plane client: %v", err)
}
return c, err
}
remove redundant route
package web
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/gorilla/mux"
"github.com/zenoss/glog"
"github.com/zenoss/go-json-rest"
"github.com/zenoss/serviced"
"github.com/zenoss/serviced/dao"
"mime"
"net/http"
"net/http/httputil"
"net/url"
)
type ServiceConfig struct {
bindPort string
agentPort string
zookeepers []string
stats bool
hostaliases []string
}
func NewServiceConfig(bindPort string, agentPort string, zookeepers []string, stats bool, hostaliases string) *ServiceConfig {
cfg := ServiceConfig{bindPort, agentPort, zookeepers, stats, []string{}}
if hostaliases != "" {
cfg.hostaliases = strings.Split(hostaliases, ":")
}
if len(cfg.agentPort) == 0 {
cfg.agentPort = "127.0.0.1:4979"
}
if len(cfg.zookeepers) == 0 {
cfg.zookeepers = []string{"127.0.0.1:2181"}
}
return &cfg
}
// Serve handles control plane web UI requests and virtual host requests for zenoss web based services.
// The UI server actually listens on port 7878, the uihandler defined here just reverse proxies to it.
// Virutal host routing to zenoss web based services is done by the vhosthandler function.
func (sc *ServiceConfig) Serve() {
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to get control plane client: %v", err)
return
}
// Reverse proxy to the web UI server.
uihandler := func(w http.ResponseWriter, r *http.Request) {
uiUrl, err := url.Parse("http://127.0.0.1:7878")
if err != nil {
glog.Errorf("Can't parse UI URL: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ui := httputil.NewSingleHostReverseProxy(uiUrl)
if ui == nil {
glog.Errorf("Can't proxy UI request: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ui.ServeHTTP(w, r)
}
// Lookup the appropriate virtual host and forward the request to it.
// TODO: when zookeeper registration is integrated we can be more event
// driven and only refresh the vhost map when service states change.
vhosthandler := func(w http.ResponseWriter, r *http.Request) {
var empty interface{}
services := []*dao.RunningService{}
client.GetRunningServices(&empty, &services)
vhosts := make(map[string][]*dao.ServiceState, 0)
for _, s := range services {
var svc dao.Service
if err := client.GetService(s.ServiceId, &svc); err != nil {
glog.Errorf("Can't get service: %s (%v)", s.Id, err)
}
vheps := svc.GetServiceVHosts()
for _, vhep := range vheps {
for _, vh := range vhep.VHosts {
svcstates := []*dao.ServiceState{}
if err := client.GetServiceStates(s.ServiceId, &svcstates); err != nil {
http.Error(w, fmt.Sprintf("can't retrieve service states for %s (%v)", s.ServiceId, err), http.StatusInternalServerError)
return
}
for _, ss := range svcstates {
vhosts[vh] = append(vhosts[vh], ss)
}
}
}
}
muxvars := mux.Vars(r)
svcstates, ok := vhosts[muxvars["subdomain"]]
if !ok {
http.Error(w, fmt.Sprintf("unknown vhost: %v", muxvars["subdomain"]), http.StatusNotFound)
return
}
// TODO: implement a more intelligent strategy than "always pick the first one" when more
// than one service state is mapped to a given virtual host
for _, svcep := range svcstates[0].Endpoints {
for _, vh := range svcep.VHosts {
if vh == muxvars["subdomain"] {
rp := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", svcstates[0].PrivateIp, svcep.PortNumber)})
rp.ServeHTTP(w, r)
return
}
}
}
http.Error(w, fmt.Sprintf("unrecognized endpoint: %s", muxvars["subdomain"]), http.StatusNotImplemented)
}
r := mux.NewRouter()
if hnm, err := os.Hostname(); err == nil {
sc.hostaliases = append(sc.hostaliases, hnm)
}
cmd := exec.Command("hostname", "--fqdn")
if hnm, err := cmd.CombinedOutput(); err == nil {
sc.hostaliases = append(sc.hostaliases, string(hnm[:len(hnm)-1]))
}
for _, ha := range sc.hostaliases {
r.HandleFunc("/", vhosthandler).Host(fmt.Sprintf("{subdomain}.%s", ha))
r.HandleFunc("/{path:.*}", vhosthandler).Host(fmt.Sprintf("{subdomain}.%s", ha))
}
r.HandleFunc("/{path:.*}", uihandler)
http.Handle("/", r)
certfile, err := serviced.TempCertFile()
if err != nil {
glog.Error("Could not prepare cert.pem file.")
}
keyfile, err := serviced.TempKeyFile()
if err != nil {
glog.Error("Could not prepare key.pem file.")
}
http.ListenAndServeTLS(sc.bindPort, certfile, keyfile, nil)
}
func (this *ServiceConfig) ServeUI() {
mime.AddExtensionType(".json", "application/json")
mime.AddExtensionType(".woff", "application/font-woff")
handler := rest.ResourceHandler{
EnableRelaxedContentType: true,
}
routes := []rest.Route{
rest.Route{"GET", "/", MainPage},
rest.Route{"GET", "/test", TestPage},
rest.Route{"GET", "/stats", this.IsCollectingStats()},
// Hosts
rest.Route{"GET", "/hosts", this.AuthorizedClient(RestGetHosts)},
rest.Route{"POST", "/hosts/add", this.AuthorizedClient(RestAddHost)},
rest.Route{"DELETE", "/hosts/:hostId", this.AuthorizedClient(RestRemoveHost)},
rest.Route{"PUT", "/hosts/:hostId", this.AuthorizedClient(RestUpdateHost)},
rest.Route{"GET", "/hosts/:hostId/running", this.AuthorizedClient(RestGetRunningForHost)},
rest.Route{"DELETE", "/hosts/:hostId/:serviceStateId", this.AuthorizedClient(RestKillRunning)},
// Pools
rest.Route{"POST", "/pools/add", this.AuthorizedClient(RestAddPool)},
rest.Route{"GET", "/pools/:poolId/hosts", this.AuthorizedClient(RestGetHostsForResourcePool)},
rest.Route{"DELETE", "/pools/:poolId", this.AuthorizedClient(RestRemovePool)},
rest.Route{"PUT", "/pools/:poolId", this.AuthorizedClient(RestUpdatePool)},
rest.Route{"GET", "/pools", this.AuthorizedClient(RestGetPools)},
// Services (Apps)
rest.Route{"GET", "/services", this.AuthorizedClient(RestGetAllServices)},
rest.Route{"GET", "/services/:serviceId", this.AuthorizedClient(RestGetService)},
rest.Route{"GET", "/services/:serviceId/running", this.AuthorizedClient(RestGetRunningForService)},
rest.Route{"GET", "/services/:serviceId/running/:serviceStateId", this.AuthorizedClient(RestGetRunningService)},
rest.Route{"GET", "/services/:serviceId/:serviceStateId/logs", this.AuthorizedClient(RestGetServiceStateLogs)},
rest.Route{"POST", "/services/add", this.AuthorizedClient(RestAddService)},
rest.Route{"DELETE", "/services/:serviceId", this.AuthorizedClient(RestRemoveService)},
rest.Route{"GET", "/services/:serviceId/logs", this.AuthorizedClient(RestGetServiceLogs)},
rest.Route{"PUT", "/services/:serviceId", this.AuthorizedClient(RestUpdateService)},
rest.Route{"GET", "/services/:serviceId/snapshot", this.AuthorizedClient(RestSnapshotService)},
rest.Route{"PUT", "/services/:serviceId/startService", this.AuthorizedClient(RestStartService)},
rest.Route{"PUT", "/services/:serviceId/stopService", this.AuthorizedClient(RestStopService)},
// Services (Virtual Host)
rest.Route{"GET", "/vhosts", this.AuthorizedClient(RestGetVirtualHosts)},
rest.Route{"POST", "/vhosts/:serviceId/:application/:vhostName", this.AuthorizedClient(RestAddVirtualHost)},
rest.Route{"DELETE", "/vhosts/:serviceId/:application/:vhostName", this.AuthorizedClient(RestRemoveVirtualHost)},
// Service templates (App templates)
rest.Route{"GET", "/templates", this.AuthorizedClient(RestGetAppTemplates)},
rest.Route{"POST", "/templates/deploy", this.AuthorizedClient(RestDeployAppTemplate)},
// Login
rest.Route{"POST", "/login", this.UnAuthorizedClient(RestLogin)},
rest.Route{"DELETE", "/login", RestLogout},
// "Misc" stuff
rest.Route{"GET", "/top/services", this.AuthorizedClient(RestGetTopServices)},
rest.Route{"GET", "/running", this.AuthorizedClient(RestGetAllRunning)},
// Generic static data
rest.Route{"GET", "/favicon.ico", FavIcon},
rest.Route{"GET", "/static*resource", StaticData},
}
// Hardcoding these target URLs for now.
// TODO: When internal services are allowed to run on other hosts, look that up.
routes = routeToInternalServiceProxy("/api/controlplane/elastic", "http://127.0.0.1:9200/", routes)
routes = routeToInternalServiceProxy("/metrics", "http://127.0.0.1:8888/", routes)
handler.SetRoutes(routes...)
http.ListenAndServe(":7878", &handler)
}
var methods []string = []string{"GET", "POST", "PUT", "DELETE"}
func routeToInternalServiceProxy(path string, target string, routes []rest.Route) []rest.Route {
targetUrl, err := url.Parse(target)
if err != nil {
glog.Errorf("Unable to parse proxy target URL: %s", target)
return routes
}
// Wrap the normal http.Handler in a rest.HandlerFunc
handlerFunc := func(w *rest.ResponseWriter, r *rest.Request) {
proxy := serviced.NewReverseProxy(path, targetUrl)
proxy.ServeHTTP(w.ResponseWriter, r.Request)
}
// Add on a glob to match subpaths
andsubpath := path + "*x"
for _, method := range methods {
routes = append(routes, rest.Route{method, path, handlerFunc})
routes = append(routes, rest.Route{method, andsubpath, handlerFunc})
}
return routes
}
func (this *ServiceConfig) UnAuthorizedClient(realfunc HandlerClientFunc) HandlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
client, err := this.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
RestServerError(w)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (this *ServiceConfig) AuthorizedClient(realfunc HandlerClientFunc) HandlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !LoginOk(r) {
RestUnauthorized(w)
return
}
client, err := this.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
RestServerError(w)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (this *ServiceConfig) IsCollectingStats() HandlerFunc {
if this.stats {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusOK)
}
} else {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusNotImplemented)
}
}
}
func (this *ServiceConfig) getClient() (c *serviced.ControlClient, err error) {
// setup the client
c, err = serviced.NewControlClient(this.agentPort)
if err != nil {
glog.Fatalf("Could not create a control plane client: %v", err)
}
return c, err
}
|
package core
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSignatureBindExpandedPositionalArgument(t *testing.T) {
s := NewSignature([]string{"x"}, nil, "", nil, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(NewList(Nil), true)}, nil, nil)
_, err := s.Bind(args)
assert.Equal(t, (*Thunk)(nil), err)
}
func TestSignatureBindNoArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", nil, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindRequiredKeywordArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", []string{"arg"}, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindOptionalKeywordArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", nil, []OptionalArgument{NewOptionalArgument("arg", Nil)}, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindExpandedDictionaries(t *testing.T) {
insert := NewLazyFunction(
NewSignature(
[]string{"collection", "key", "value"}, nil, "",
nil, nil, "",
),
func(ts ...*Thunk) (result Value) {
return PApp(Insert, ts...)
})
f := App(Partial, NewArguments(
[]PositionalArgument{
NewPositionalArgument(insert, false),
NewPositionalArgument(EmptyDictionary, false),
},
nil,
[]*Thunk{NewDictionary([]Value{NewString("key").Eval()}, []*Thunk{True})}))
v := App(f, NewArguments(nil, []KeywordArgument{NewKeywordArgument("value", NewNumber(42))}, nil)).Eval()
_, ok := v.(DictionaryType)
assert.True(t, ok)
// Check if the Arguments passed to Partial is persistent.
v = App(f, NewArguments(nil, []KeywordArgument{NewKeywordArgument("value", NewNumber(42))}, nil)).Eval()
_, ok = v.(DictionaryType)
assert.True(t, ok)
}
Test Signature.Bind() with expanded dictionary argument
package core
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSignatureBindExpandedPositionalArgument(t *testing.T) {
s := NewSignature([]string{"x"}, nil, "", nil, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(NewList(Nil), true)}, nil, nil)
_, err := s.Bind(args)
assert.Equal(t, (*Thunk)(nil), err)
}
func TestSignatureBindExpandedDictionary(t *testing.T) {
s := NewSignature(nil, nil, "", []string{"foo"}, nil, "")
args := NewArguments(
nil,
nil,
[]*Thunk{NewDictionary([]Value{NewString("foo").Eval()}, []*Thunk{Nil})})
_, err := s.Bind(args)
assert.Equal(t, (*Thunk)(nil), err)
}
func TestSignatureBindNoArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", nil, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindRequiredKeywordArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", []string{"arg"}, nil, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindOptionalKeywordArgumentFail(t *testing.T) {
s := NewSignature(nil, nil, "", nil, []OptionalArgument{NewOptionalArgument("arg", Nil)}, "")
args := NewArguments([]PositionalArgument{NewPositionalArgument(Nil, false)}, nil, nil)
_, err := s.Bind(args)
assert.NotEqual(t, (*Thunk)(nil), err)
}
func TestSignatureBindExpandedDictionaries(t *testing.T) {
insert := NewLazyFunction(
NewSignature(
[]string{"collection", "key", "value"}, nil, "",
nil, nil, "",
),
func(ts ...*Thunk) (result Value) {
return PApp(Insert, ts...)
})
f := App(Partial, NewArguments(
[]PositionalArgument{
NewPositionalArgument(insert, false),
NewPositionalArgument(EmptyDictionary, false),
},
nil,
[]*Thunk{NewDictionary([]Value{NewString("key").Eval()}, []*Thunk{True})}))
v := App(f, NewArguments(nil, []KeywordArgument{NewKeywordArgument("value", NewNumber(42))}, nil)).Eval()
_, ok := v.(DictionaryType)
assert.True(t, ok)
// Check if the Arguments passed to Partial is persistent.
v = App(f, NewArguments(nil, []KeywordArgument{NewKeywordArgument("value", NewNumber(42))}, nil)).Eval()
_, ok = v.(DictionaryType)
assert.True(t, ok)
}
|
package main
import (
"fmt"
"os"
"github.com/codegangsta/cli"
r "github.com/dancannon/gorethink"
"github.com/materials-commons/config"
"github.com/materials-commons/mcstore/pkg/db"
"github.com/materials-commons/mcstore/pkg/db/schema"
"github.com/materials-commons/mcstore/server/mcstore/pkg/search"
"github.com/materials-commons/mcstore/server/mcstore/pkg/search/doc"
"gopkg.in/olivere/elastic.v2"
)
var mappings string = `
{
"mappings": {
"files": {
"properties": {
"project_id": {
"type": "string",
"index": "not_analyzed"
},
"project": {
"type": "string",
"index": "not_analyzed"
},
"datadir_id": {
"type": "string",
"index": "not_analyzed"
},
"id": {
"type": "string",
"index": "not_analyzed"
},
"usesid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"projects": {
"properties": {
"id": {
"type": "string",
"index": "not_analyzed"
},
"datadir": {
"type": "string",
"index": "not_analyzed"
}
}
},
"samples": {
"properties":{
"id": {
"type": "string",
"index": "not_analyzed"
},
"project_id": {
"type": "string",
"index": "not_analyzed"
},
"sample_id": {
"type": "string",
"index": "not_analyzed"
}
}
},
"processes": {
"properties":{
"process_id": {
"type": "string",
"index": "not_analyzed"
},
"project_id": {
"type": "string",
"index": "not_analyzed"
}
}
},
"users": {
"properties":{
"id": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
`
func main() {
app := cli.NewApp()
app.Version = "1.0.0"
app.Authors = []cli.Author{
{
Name: "V. Glenn Tarcea",
Email: "gtarcea@umich.edu",
},
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "es-url",
Value: "http://localhost:9500",
Usage: "Elasticsearch server URL",
EnvVar: "MC_ES_URL",
},
cli.StringFlag{
Name: "db-connection",
Value: "localhost:30815",
Usage: "RethinkDB connection string",
EnvVar: "MCDB_CONNECTION",
},
cli.StringFlag{
Name: "db-name",
Value: "materialscommons",
Usage: "Database to index",
EnvVar: "MCDB_NAME",
},
cli.StringFlag{
Name: "mc-dir",
Value: "/mcfs/data/test",
Usage: "Path to data directory",
EnvVar: "MCDIR",
},
cli.BoolFlag{
Name: "create-index",
Usage: "Whether the index should be recreated",
},
cli.BoolTFlag{
Name: "processes",
Usage: "Index processes",
},
cli.BoolTFlag{
Name: "files",
Usage: "Index files",
},
cli.BoolTFlag{
Name: "samples",
Usage: "Index samples",
},
cli.BoolTFlag{
Name: "projects",
Usage: "Index projects",
},
cli.BoolTFlag{
Name: "users",
Usage: "Index users",
},
}
app.Action = mcbulkCLI
app.Run(os.Args)
}
func mcbulkCLI(c *cli.Context) {
setupConfig(c)
runCommands(c)
}
func setupConfig(c *cli.Context) {
esurl := c.String("es-url")
config.Set("MC_ES_URL", esurl)
dbname := c.String("db-name")
config.Set("MCDB_NAME", dbname)
dbcon := c.String("db-connection")
config.Set("MCDB_CONNECTION", dbcon)
}
func runCommands(c *cli.Context) {
esurl := esURL()
fmt.Println("Elasticsearch URL:", esurl)
client, err := elastic.NewClient(elastic.SetURL(esurl))
if err != nil {
panic("Unable to connect to elasticsearch")
}
session := db.RSessionMust()
if c.Bool("create-index") {
createIndex(client)
}
if c.BoolT("files") {
loadFiles(client, session)
}
if c.BoolT("users") {
loadUsers(client, session)
}
if c.BoolT("projects") {
loadProjects(client, session)
}
if c.BoolT("samples") {
loadSamples(client, session)
}
if c.BoolT("processes") {
loadProcesses(client, session)
}
}
func esURL() string {
if esURL := config.GetString("MC_ES_URL"); esURL != "" {
return esURL
}
return "http://localhost:9200"
}
func createIndex(client *elastic.Client) {
fmt.Println("Creating index mc...")
exists, err := client.IndexExists("mc").Do()
if err != nil {
panic(" Failed checking index existence")
}
if exists {
fmt.Println(" Index exists deleting old one")
client.DeleteIndex("mc").Do()
}
createStatus, err := client.CreateIndex("mc").Body(mappings).Do()
if err != nil {
fmt.Println(" Failed creating index: ", err)
os.Exit(1)
}
if !createStatus.Acknowledged {
fmt.Println(" Index create not acknowledged")
}
fmt.Println("Done.")
}
func loadFiles(client *elastic.Client, session *r.Session) {
var df doc.File
filesIndexer := search.NewFilesIndexer(client, session)
fmt.Println("Indexing files...")
if err := filesIndexer.Do("files", df); err != nil {
fmt.Println(" Indexing files failed:", err)
fmt.Println(" Some files may not have been indexed.")
}
fmt.Println("Done.")
}
func loadUsers(client *elastic.Client, session *r.Session) {
var u schema.User
usersIndexer := search.NewUsersIndexer(client, session)
fmt.Println("Indexing users...")
if err := usersIndexer.Do("users", u); err != nil {
fmt.Println(" Indexing users failed:", err)
fmt.Println(" Some users may not have been indexed.")
}
fmt.Println("Done.")
}
func loadProjects(client *elastic.Client, session *r.Session) {
var p schema.Project
projectsIndexer := search.NewProjectsIndexer(client, session)
fmt.Println("Indexing projects...")
if err := projectsIndexer.Do("projects", p); err != nil {
fmt.Println(" Indexing projects failed:", err)
fmt.Println(" Some projects may not have been indexed.")
}
fmt.Println("Done.")
}
func loadSamples(client *elastic.Client, session *r.Session) {
var sample doc.Sample
samplesIndexer := search.NewSamplesIndexer(client, session)
fmt.Println("Indexing samples...")
if err := samplesIndexer.Do("samples", sample); err != nil {
fmt.Println(" Indexing samples failed:", err)
fmt.Println(" Some samples may not have been indexed.")
}
fmt.Println("Done.")
}
func loadProcesses(client *elastic.Client, session *r.Session) {
var process doc.Process
processesIndexer := search.NewProcessesIndexer(client, session)
fmt.Println("Indexing processes...")
if err := processesIndexer.Do("processes", process); err != nil {
fmt.Println(" Indexing processes failed:", err)
fmt.Println(" Some processes may not have been indexed.")
}
fmt.Println("Done.")
}
Formatting: Added a new line.
package main
import (
"fmt"
"os"
"github.com/codegangsta/cli"
r "github.com/dancannon/gorethink"
"github.com/materials-commons/config"
"github.com/materials-commons/mcstore/pkg/db"
"github.com/materials-commons/mcstore/pkg/db/schema"
"github.com/materials-commons/mcstore/server/mcstore/pkg/search"
"github.com/materials-commons/mcstore/server/mcstore/pkg/search/doc"
"gopkg.in/olivere/elastic.v2"
)
var mappings string = `
{
"mappings": {
"files": {
"properties": {
"project_id": {
"type": "string",
"index": "not_analyzed"
},
"project": {
"type": "string",
"index": "not_analyzed"
},
"datadir_id": {
"type": "string",
"index": "not_analyzed"
},
"id": {
"type": "string",
"index": "not_analyzed"
},
"usesid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"projects": {
"properties": {
"id": {
"type": "string",
"index": "not_analyzed"
},
"datadir": {
"type": "string",
"index": "not_analyzed"
}
}
},
"samples": {
"properties":{
"id": {
"type": "string",
"index": "not_analyzed"
},
"project_id": {
"type": "string",
"index": "not_analyzed"
},
"sample_id": {
"type": "string",
"index": "not_analyzed"
}
}
},
"processes": {
"properties":{
"process_id": {
"type": "string",
"index": "not_analyzed"
},
"project_id": {
"type": "string",
"index": "not_analyzed"
}
}
},
"users": {
"properties":{
"id": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
`
func main() {
app := cli.NewApp()
app.Version = "1.0.0"
app.Authors = []cli.Author{
{
Name: "V. Glenn Tarcea",
Email: "gtarcea@umich.edu",
},
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "es-url",
Value: "http://localhost:9500",
Usage: "Elasticsearch server URL",
EnvVar: "MC_ES_URL",
},
cli.StringFlag{
Name: "db-connection",
Value: "localhost:30815",
Usage: "RethinkDB connection string",
EnvVar: "MCDB_CONNECTION",
},
cli.StringFlag{
Name: "db-name",
Value: "materialscommons",
Usage: "Database to index",
EnvVar: "MCDB_NAME",
},
cli.StringFlag{
Name: "mc-dir",
Value: "/mcfs/data/test",
Usage: "Path to data directory",
EnvVar: "MCDIR",
},
cli.BoolFlag{
Name: "create-index",
Usage: "Whether the index should be recreated",
},
cli.BoolTFlag{
Name: "processes",
Usage: "Index processes",
},
cli.BoolTFlag{
Name: "files",
Usage: "Index files",
},
cli.BoolTFlag{
Name: "samples",
Usage: "Index samples",
},
cli.BoolTFlag{
Name: "projects",
Usage: "Index projects",
},
cli.BoolTFlag{
Name: "users",
Usage: "Index users",
},
}
app.Action = mcbulkCLI
app.Run(os.Args)
}
func mcbulkCLI(c *cli.Context) {
setupConfig(c)
runCommands(c)
}
func setupConfig(c *cli.Context) {
esurl := c.String("es-url")
config.Set("MC_ES_URL", esurl)
dbname := c.String("db-name")
config.Set("MCDB_NAME", dbname)
dbcon := c.String("db-connection")
config.Set("MCDB_CONNECTION", dbcon)
}
func runCommands(c *cli.Context) {
esurl := esURL()
fmt.Println("Elasticsearch URL:", esurl)
client, err := elastic.NewClient(elastic.SetURL(esurl))
if err != nil {
panic("Unable to connect to elasticsearch")
}
session := db.RSessionMust()
if c.Bool("create-index") {
createIndex(client)
}
if c.BoolT("files") {
loadFiles(client, session)
}
if c.BoolT("users") {
loadUsers(client, session)
}
if c.BoolT("projects") {
loadProjects(client, session)
}
if c.BoolT("samples") {
loadSamples(client, session)
}
if c.BoolT("processes") {
loadProcesses(client, session)
}
}
func esURL() string {
if esURL := config.GetString("MC_ES_URL"); esURL != "" {
return esURL
}
return "http://localhost:9200"
}
func createIndex(client *elastic.Client) {
fmt.Println("Creating index mc...")
exists, err := client.IndexExists("mc").Do()
if err != nil {
panic(" Failed checking index existence")
}
if exists {
fmt.Println(" Index exists deleting old one")
client.DeleteIndex("mc").Do()
}
createStatus, err := client.CreateIndex("mc").Body(mappings).Do()
if err != nil {
fmt.Println(" Failed creating index: ", err)
os.Exit(1)
}
if !createStatus.Acknowledged {
fmt.Println(" Index create not acknowledged")
}
fmt.Println("Done.")
}
func loadFiles(client *elastic.Client, session *r.Session) {
var df doc.File
filesIndexer := search.NewFilesIndexer(client, session)
fmt.Println("Indexing files...")
if err := filesIndexer.Do("files", df); err != nil {
fmt.Println(" Indexing files failed:", err)
fmt.Println(" Some files may not have been indexed.")
}
fmt.Println("Done.")
}
func loadUsers(client *elastic.Client, session *r.Session) {
var u schema.User
usersIndexer := search.NewUsersIndexer(client, session)
fmt.Println("Indexing users...")
if err := usersIndexer.Do("users", u); err != nil {
fmt.Println(" Indexing users failed:", err)
fmt.Println(" Some users may not have been indexed.")
}
fmt.Println("Done.")
}
func loadProjects(client *elastic.Client, session *r.Session) {
var p schema.Project
projectsIndexer := search.NewProjectsIndexer(client, session)
fmt.Println("Indexing projects...")
if err := projectsIndexer.Do("projects", p); err != nil {
fmt.Println(" Indexing projects failed:", err)
fmt.Println(" Some projects may not have been indexed.")
}
fmt.Println("Done.")
}
func loadSamples(client *elastic.Client, session *r.Session) {
var sample doc.Sample
samplesIndexer := search.NewSamplesIndexer(client, session)
fmt.Println("Indexing samples...")
if err := samplesIndexer.Do("samples", sample); err != nil {
fmt.Println(" Indexing samples failed:", err)
fmt.Println(" Some samples may not have been indexed.")
}
fmt.Println("Done.")
}
func loadProcesses(client *elastic.Client, session *r.Session) {
var process doc.Process
processesIndexer := search.NewProcessesIndexer(client, session)
fmt.Println("Indexing processes...")
if err := processesIndexer.Do("processes", process); err != nil {
fmt.Println(" Indexing processes failed:", err)
fmt.Println(" Some processes may not have been indexed.")
}
fmt.Println("Done.")
}
|
package ecs
import (
"encoding/json"
"strconv"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/util"
)
// InstanceStatus represents instance status
type InstanceStatus string
// Constants of InstanceStatus
const (
Creating = InstanceStatus("Creating")
Running = InstanceStatus("Running")
Starting = InstanceStatus("Starting")
Stopped = InstanceStatus("Stopped")
Stopping = InstanceStatus("Stopping")
)
type LockReason string
const (
LockReasonFinancial = LockReason("financial")
LockReasonSecurity = LockReason("security")
)
type LockReasonType struct {
LockReason LockReason
}
type DescribeInstanceStatusArgs struct {
RegionId common.Region
ZoneId string
common.Pagination
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancestatusitemtype
type InstanceStatusItemType struct {
InstanceId string
Status InstanceStatus
}
type DescribeInstanceStatusResponse struct {
common.Response
common.PaginationResult
InstanceStatuses struct {
InstanceStatus []InstanceStatusItemType
}
}
// DescribeInstanceStatus describes instance status
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancestatus
func (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *common.PaginationResult, err error) {
args.Validate()
response := DescribeInstanceStatusResponse{}
err = client.Invoke("DescribeInstanceStatus", args, &response)
if err == nil {
return response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil
}
return nil, nil, err
}
type StopInstanceArgs struct {
InstanceId string
ForceStop bool
}
type StopInstanceResponse struct {
common.Response
}
// StopInstance stops instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&stopinstance
func (client *Client) StopInstance(instanceId string, forceStop bool) error {
args := StopInstanceArgs{
InstanceId: instanceId,
ForceStop: forceStop,
}
response := StopInstanceResponse{}
err := client.Invoke("StopInstance", &args, &response)
return err
}
type StartInstanceArgs struct {
InstanceId string
}
type StartInstanceResponse struct {
common.Response
}
// StartInstance starts instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&startinstance
func (client *Client) StartInstance(instanceId string) error {
args := StartInstanceArgs{InstanceId: instanceId}
response := StartInstanceResponse{}
err := client.Invoke("StartInstance", &args, &response)
return err
}
type RebootInstanceArgs struct {
InstanceId string
ForceStop bool
}
type RebootInstanceResponse struct {
common.Response
}
// RebootInstance reboot instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&rebootinstance
func (client *Client) RebootInstance(instanceId string, forceStop bool) error {
request := RebootInstanceArgs{
InstanceId: instanceId,
ForceStop: forceStop,
}
response := RebootInstanceResponse{}
err := client.Invoke("RebootInstance", &request, &response)
return err
}
type DescribeInstanceAttributeArgs struct {
InstanceId string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&operationlockstype
type OperationLocksType struct {
LockReason []LockReasonType //enum for financial, security
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&securitygroupidsettype
type SecurityGroupIdSetType struct {
SecurityGroupId string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&ipaddresssettype
type IpAddressSetType struct {
IpAddress []string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vpcattributestype
type VpcAttributesType struct {
VpcId string
VSwitchId string
PrivateIpAddress IpAddressSetType
NatIpAddress string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&eipaddressassociatetype
type EipAddressAssociateType struct {
AllocationId string
IpAddress string
Bandwidth int
InternetChargeType common.InternetChargeType
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instanceattributestype
type InstanceAttributesType struct {
InstanceId string
InstanceName string
Description string
ImageId string
RegionId common.Region
ZoneId string
CPU int
Memory int
ClusterId string
InstanceType string
InstanceTypeFamily string
HostName string
SerialNumber string
Status InstanceStatus
OperationLocks OperationLocksType
SecurityGroupIds struct {
SecurityGroupId []string
}
PublicIpAddress IpAddressSetType
InnerIpAddress IpAddressSetType
InstanceNetworkType string //enum Classic | Vpc
InternetMaxBandwidthIn int
InternetMaxBandwidthOut int
InternetChargeType common.InternetChargeType
CreationTime util.ISO6801Time //time.Time
VpcAttributes VpcAttributesType
EipAddress EipAddressAssociateType
IoOptimized StringOrBool
InstanceChargeType common.InternetChargeType
ExpiredTime util.ISO6801Time
}
type DescribeInstanceAttributeResponse struct {
common.Response
InstanceAttributesType
}
// DescribeInstanceAttribute describes instance attribute
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstanceattribute
func (client *Client) DescribeInstanceAttribute(instanceId string) (instance *InstanceAttributesType, err error) {
args := DescribeInstanceAttributeArgs{InstanceId: instanceId}
response := DescribeInstanceAttributeResponse{}
err = client.Invoke("DescribeInstanceAttribute", &args, &response)
if err != nil {
return nil, err
}
return &response.InstanceAttributesType, err
}
type ModifyInstanceAttributeArgs struct {
InstanceId string
InstanceName string
Description string
Password string
HostName string
}
type ModifyInstanceAttributeResponse struct {
common.Response
}
//ModifyInstanceAttribute modify instance attrbute
//
// You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/modifyinstanceattribute.html
func (client *Client) ModifyInstanceAttribute(args *ModifyInstanceAttributeArgs) error {
response := ModifyInstanceAttributeResponse{}
err := client.Invoke("ModifyInstanceAttribute", args, &response)
return err
}
// Default timeout value for WaitForInstance method
const InstanceDefaultTimeout = 120
// WaitForInstance waits for instance to given status
func (client *Client) WaitForInstance(instanceId string, status InstanceStatus, timeout int) error {
if timeout <= 0 {
timeout = InstanceDefaultTimeout
}
for {
instance, err := client.DescribeInstanceAttribute(instanceId)
if err != nil {
return err
}
if instance.Status == status {
//TODO
//Sleep one more time for timing issues
time.Sleep(DefaultWaitForInterval * time.Second)
break
}
timeout = timeout - DefaultWaitForInterval
if timeout <= 0 {
return common.GetClientErrorFromString("Timeout")
}
time.Sleep(DefaultWaitForInterval * time.Second)
}
return nil
}
type DescribeInstanceVncUrlArgs struct {
RegionId common.Region
InstanceId string
}
type DescribeInstanceVncUrlResponse struct {
common.Response
VncUrl string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancevncurl
func (client *Client) DescribeInstanceVncUrl(args *DescribeInstanceVncUrlArgs) (string, error) {
response := DescribeInstanceVncUrlResponse{}
err := client.Invoke("DescribeInstanceVncUrl", args, &response)
if err == nil {
return response.VncUrl, nil
}
return "", err
}
type DescribeInstancesArgs struct {
RegionId common.Region
VpcId string
VSwitchId string
ZoneId string
InstanceIds string
InstanceNetworkType string
InstanceName string
PrivateIpAddresses string
InnerIpAddresses string
PublicIpAddresses string
SecurityGroupId string
common.Pagination
}
type DescribeInstancesResponse struct {
common.Response
common.PaginationResult
Instances struct {
Instance []InstanceAttributesType
}
}
// DescribeInstances describes instances
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstances
func (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *common.PaginationResult, err error) {
args.Validate()
response := DescribeInstancesResponse{}
err = client.Invoke("DescribeInstances", args, &response)
if err == nil {
return response.Instances.Instance, &response.PaginationResult, nil
}
return nil, nil, err
}
type DeleteInstanceArgs struct {
InstanceId string
}
type DeleteInstanceResponse struct {
common.Response
}
// DeleteInstance deletes instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&deleteinstance
func (client *Client) DeleteInstance(instanceId string) error {
args := DeleteInstanceArgs{InstanceId: instanceId}
response := DeleteInstanceResponse{}
err := client.Invoke("DeleteInstance", &args, &response)
return err
}
type DataDiskType struct {
Size int
Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd
SnapshotId string
DiskName string
Description string
Device string
DeleteWithInstance bool
}
type SystemDiskType struct {
Size int
Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd
DiskName string
Description string
}
type IoOptimized string
type StringOrBool struct {
Value bool
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (io *StringOrBool) UnmarshalJSON(value []byte) error {
if value[0] == '"' {
var str string
err := json.Unmarshal(value, &str)
if err == nil {
io.Value = (str == "true" || str == "optimized")
}
return err
}
var boolVal bool
err := json.Unmarshal(value, &boolVal)
if err == nil {
io.Value = boolVal
}
return err
}
func (io StringOrBool) Bool() bool {
return io.Value
}
func (io StringOrBool) String() string {
return strconv.FormatBool(io.Value)
}
var (
IoOptimizedNone = IoOptimized("none")
IoOptimizedOptimized = IoOptimized("optimized")
)
type CreateInstanceArgs struct {
RegionId common.Region
ZoneId string
ImageId string
InstanceType string
SecurityGroupId string
InstanceName string
Description string
InternetChargeType common.InternetChargeType
InternetMaxBandwidthIn int
InternetMaxBandwidthOut int
HostName string
Password string
IoOptimized IoOptimized
SystemDisk SystemDiskType
DataDisk []DataDiskType
VSwitchId string
PrivateIpAddress string
ClientToken string
InstanceChargeType common.InstanceChargeType
Period int
}
type CreateInstanceResponse struct {
common.Response
InstanceId string
}
// CreateInstance creates instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance
func (client *Client) CreateInstance(args *CreateInstanceArgs) (instanceId string, err error) {
response := CreateInstanceResponse{}
err = client.Invoke("CreateInstance", args, &response)
if err != nil {
return "", err
}
return response.InstanceId, err
}
type SecurityGroupArgs struct {
InstanceId string
SecurityGroupId string
}
type SecurityGroupResponse struct {
common.Response
}
//JoinSecurityGroup
//
//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/joinsecuritygroup.html
func (client *Client) JoinSecurityGroup(instanceId string, securityGroupId string) error {
args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId}
response := SecurityGroupResponse{}
err := client.Invoke("JoinSecurityGroup", &args, &response)
return err
}
//LeaveSecurityGroup
//
//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/leavesecuritygroup.html
func (client *Client) LeaveSecurityGroup(instanceId string, securityGroupId string) error {
args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId}
response := SecurityGroupResponse{}
err := client.Invoke("LeaveSecurityGroup", &args, &response)
return err
}
support to search instance by status
package ecs
import (
"encoding/json"
"strconv"
"time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/util"
)
// InstanceStatus represents instance status
type InstanceStatus string
// Constants of InstanceStatus
const (
Creating = InstanceStatus("Creating")
Running = InstanceStatus("Running")
Starting = InstanceStatus("Starting")
Stopped = InstanceStatus("Stopped")
Stopping = InstanceStatus("Stopping")
)
type LockReason string
const (
LockReasonFinancial = LockReason("financial")
LockReasonSecurity = LockReason("security")
)
type LockReasonType struct {
LockReason LockReason
}
type DescribeInstanceStatusArgs struct {
RegionId common.Region
ZoneId string
common.Pagination
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancestatusitemtype
type InstanceStatusItemType struct {
InstanceId string
Status InstanceStatus
}
type DescribeInstanceStatusResponse struct {
common.Response
common.PaginationResult
InstanceStatuses struct {
InstanceStatus []InstanceStatusItemType
}
}
// DescribeInstanceStatus describes instance status
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancestatus
func (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *common.PaginationResult, err error) {
args.Validate()
response := DescribeInstanceStatusResponse{}
err = client.Invoke("DescribeInstanceStatus", args, &response)
if err == nil {
return response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil
}
return nil, nil, err
}
type StopInstanceArgs struct {
InstanceId string
ForceStop bool
}
type StopInstanceResponse struct {
common.Response
}
// StopInstance stops instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&stopinstance
func (client *Client) StopInstance(instanceId string, forceStop bool) error {
args := StopInstanceArgs{
InstanceId: instanceId,
ForceStop: forceStop,
}
response := StopInstanceResponse{}
err := client.Invoke("StopInstance", &args, &response)
return err
}
type StartInstanceArgs struct {
InstanceId string
}
type StartInstanceResponse struct {
common.Response
}
// StartInstance starts instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&startinstance
func (client *Client) StartInstance(instanceId string) error {
args := StartInstanceArgs{InstanceId: instanceId}
response := StartInstanceResponse{}
err := client.Invoke("StartInstance", &args, &response)
return err
}
type RebootInstanceArgs struct {
InstanceId string
ForceStop bool
}
type RebootInstanceResponse struct {
common.Response
}
// RebootInstance reboot instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&rebootinstance
func (client *Client) RebootInstance(instanceId string, forceStop bool) error {
request := RebootInstanceArgs{
InstanceId: instanceId,
ForceStop: forceStop,
}
response := RebootInstanceResponse{}
err := client.Invoke("RebootInstance", &request, &response)
return err
}
type DescribeInstanceAttributeArgs struct {
InstanceId string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&operationlockstype
type OperationLocksType struct {
LockReason []LockReasonType //enum for financial, security
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&securitygroupidsettype
type SecurityGroupIdSetType struct {
SecurityGroupId string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&ipaddresssettype
type IpAddressSetType struct {
IpAddress []string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vpcattributestype
type VpcAttributesType struct {
VpcId string
VSwitchId string
PrivateIpAddress IpAddressSetType
NatIpAddress string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&eipaddressassociatetype
type EipAddressAssociateType struct {
AllocationId string
IpAddress string
Bandwidth int
InternetChargeType common.InternetChargeType
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instanceattributestype
type InstanceAttributesType struct {
InstanceId string
InstanceName string
Description string
ImageId string
RegionId common.Region
ZoneId string
CPU int
Memory int
ClusterId string
InstanceType string
InstanceTypeFamily string
HostName string
SerialNumber string
Status InstanceStatus
OperationLocks OperationLocksType
SecurityGroupIds struct {
SecurityGroupId []string
}
PublicIpAddress IpAddressSetType
InnerIpAddress IpAddressSetType
InstanceNetworkType string //enum Classic | Vpc
InternetMaxBandwidthIn int
InternetMaxBandwidthOut int
InternetChargeType common.InternetChargeType
CreationTime util.ISO6801Time //time.Time
VpcAttributes VpcAttributesType
EipAddress EipAddressAssociateType
IoOptimized StringOrBool
InstanceChargeType common.InternetChargeType
ExpiredTime util.ISO6801Time
}
type DescribeInstanceAttributeResponse struct {
common.Response
InstanceAttributesType
}
// DescribeInstanceAttribute describes instance attribute
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstanceattribute
func (client *Client) DescribeInstanceAttribute(instanceId string) (instance *InstanceAttributesType, err error) {
args := DescribeInstanceAttributeArgs{InstanceId: instanceId}
response := DescribeInstanceAttributeResponse{}
err = client.Invoke("DescribeInstanceAttribute", &args, &response)
if err != nil {
return nil, err
}
return &response.InstanceAttributesType, err
}
type ModifyInstanceAttributeArgs struct {
InstanceId string
InstanceName string
Description string
Password string
HostName string
}
type ModifyInstanceAttributeResponse struct {
common.Response
}
//ModifyInstanceAttribute modify instance attrbute
//
// You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/modifyinstanceattribute.html
func (client *Client) ModifyInstanceAttribute(args *ModifyInstanceAttributeArgs) error {
response := ModifyInstanceAttributeResponse{}
err := client.Invoke("ModifyInstanceAttribute", args, &response)
return err
}
// Default timeout value for WaitForInstance method
const InstanceDefaultTimeout = 120
// WaitForInstance waits for instance to given status
func (client *Client) WaitForInstance(instanceId string, status InstanceStatus, timeout int) error {
if timeout <= 0 {
timeout = InstanceDefaultTimeout
}
for {
instance, err := client.DescribeInstanceAttribute(instanceId)
if err != nil {
return err
}
if instance.Status == status {
//TODO
//Sleep one more time for timing issues
time.Sleep(DefaultWaitForInterval * time.Second)
break
}
timeout = timeout - DefaultWaitForInterval
if timeout <= 0 {
return common.GetClientErrorFromString("Timeout")
}
time.Sleep(DefaultWaitForInterval * time.Second)
}
return nil
}
type DescribeInstanceVncUrlArgs struct {
RegionId common.Region
InstanceId string
}
type DescribeInstanceVncUrlResponse struct {
common.Response
VncUrl string
}
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancevncurl
func (client *Client) DescribeInstanceVncUrl(args *DescribeInstanceVncUrlArgs) (string, error) {
response := DescribeInstanceVncUrlResponse{}
err := client.Invoke("DescribeInstanceVncUrl", args, &response)
if err == nil {
return response.VncUrl, nil
}
return "", err
}
type DescribeInstancesArgs struct {
RegionId common.Region
VpcId string
VSwitchId string
ZoneId string
InstanceIds string
InstanceNetworkType string
InstanceName string
Status InstanceStatus
PrivateIpAddresses string
InnerIpAddresses string
PublicIpAddresses string
SecurityGroupId string
common.Pagination
}
type DescribeInstancesResponse struct {
common.Response
common.PaginationResult
Instances struct {
Instance []InstanceAttributesType
}
}
// DescribeInstances describes instances
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstances
func (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *common.PaginationResult, err error) {
args.Validate()
response := DescribeInstancesResponse{}
err = client.Invoke("DescribeInstances", args, &response)
if err == nil {
return response.Instances.Instance, &response.PaginationResult, nil
}
return nil, nil, err
}
type DeleteInstanceArgs struct {
InstanceId string
}
type DeleteInstanceResponse struct {
common.Response
}
// DeleteInstance deletes instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&deleteinstance
func (client *Client) DeleteInstance(instanceId string) error {
args := DeleteInstanceArgs{InstanceId: instanceId}
response := DeleteInstanceResponse{}
err := client.Invoke("DeleteInstance", &args, &response)
return err
}
type DataDiskType struct {
Size int
Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd
SnapshotId string
DiskName string
Description string
Device string
DeleteWithInstance bool
}
type SystemDiskType struct {
Size int
Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd
DiskName string
Description string
}
type IoOptimized string
type StringOrBool struct {
Value bool
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (io *StringOrBool) UnmarshalJSON(value []byte) error {
if value[0] == '"' {
var str string
err := json.Unmarshal(value, &str)
if err == nil {
io.Value = (str == "true" || str == "optimized")
}
return err
}
var boolVal bool
err := json.Unmarshal(value, &boolVal)
if err == nil {
io.Value = boolVal
}
return err
}
func (io StringOrBool) Bool() bool {
return io.Value
}
func (io StringOrBool) String() string {
return strconv.FormatBool(io.Value)
}
var (
IoOptimizedNone = IoOptimized("none")
IoOptimizedOptimized = IoOptimized("optimized")
)
type CreateInstanceArgs struct {
RegionId common.Region
ZoneId string
ImageId string
InstanceType string
SecurityGroupId string
InstanceName string
Description string
InternetChargeType common.InternetChargeType
InternetMaxBandwidthIn int
InternetMaxBandwidthOut int
HostName string
Password string
IoOptimized IoOptimized
SystemDisk SystemDiskType
DataDisk []DataDiskType
VSwitchId string
PrivateIpAddress string
ClientToken string
InstanceChargeType common.InstanceChargeType
Period int
}
type CreateInstanceResponse struct {
common.Response
InstanceId string
}
// CreateInstance creates instance
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance
func (client *Client) CreateInstance(args *CreateInstanceArgs) (instanceId string, err error) {
response := CreateInstanceResponse{}
err = client.Invoke("CreateInstance", args, &response)
if err != nil {
return "", err
}
return response.InstanceId, err
}
type SecurityGroupArgs struct {
InstanceId string
SecurityGroupId string
}
type SecurityGroupResponse struct {
common.Response
}
//JoinSecurityGroup
//
//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/joinsecuritygroup.html
func (client *Client) JoinSecurityGroup(instanceId string, securityGroupId string) error {
args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId}
response := SecurityGroupResponse{}
err := client.Invoke("JoinSecurityGroup", &args, &response)
return err
}
//LeaveSecurityGroup
//
//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/leavesecuritygroup.html
func (client *Client) LeaveSecurityGroup(instanceId string, securityGroupId string) error {
args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId}
response := SecurityGroupResponse{}
err := client.Invoke("LeaveSecurityGroup", &args, &response)
return err
}
|
package revel
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"code.google.com/p/go.net/websocket"
)
type TestSuite struct {
Client *http.Client
Response *http.Response
ResponseBody []byte
}
var TestSuites []interface{} // Array of structs that embed TestSuite
// NewTestSuite returns an initialized TestSuite ready for use. It is invoked
// by the test harness to initialize the embedded field in application tests.
func NewTestSuite() TestSuite {
return TestSuite{Client: &http.Client{}}
}
// Return the address and port of the server, e.g. "127.0.0.1:8557"
func (t *TestSuite) AddrAndPort() string {
if Server.Addr[0] == ':' {
return "127.0.0.1" + Server.Addr
}
return Server.Addr
}
// Return the base http URL of the server, e.g. "http://127.0.0.1:8557"
func (t *TestSuite) BaseUrl() string {
return "http://" + t.AddrAndPort()
}
// Return the base websocket URL of the server, e.g. "ws://127.0.0.1:8557"
func (t *TestSuite) WebSocketUrl() string {
return "ws://" + t.AddrAndPort()
}
// Issue a GET request to the given path and store the result in Request and
// RequestBody.
func (t *TestSuite) Get(path string) {
req, err := http.NewRequest("GET", t.BaseUrl()+path, nil)
if err != nil {
panic(err)
}
t.MakeRequest(req)
}
// Issue a POST request to the given path, sending the given Content-Type and
// data, and store the result in Request and RequestBody. "data" may be nil.
func (t *TestSuite) Post(path string, contentType string, reader io.Reader) {
req, err := http.NewRequest("POST", t.BaseUrl()+path, reader)
if err != nil {
panic(err)
}
req.Header.Set("Content-Type", contentType)
t.MakeRequest(req)
}
// Issue a POST request to the given path as a form post of the given key and
// values, and store the result in Request and RequestBody.
func (t *TestSuite) PostForm(path string, data url.Values) {
t.Post(path, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// Issue any request and read the response. If successful, the caller may
// examine the Response and ResponseBody properties.
func (t *TestSuite) MakeRequest(req *http.Request) {
var err error
if t.Response, err = t.Client.Do(req); err != nil {
panic(err)
}
if t.ResponseBody, err = ioutil.ReadAll(t.Response.Body); err != nil {
panic(err)
}
}
// Create a websocket connection to the given path and return the connection
func (t *TestSuite) WebSocket(path string) *websocket.Conn{
origin := t.BaseUrl() + "/"
url := t.WebSocketUrl() + path
ws, err := websocket.Dial(url, "", origin)
if err != nil {
panic(err)
}
return ws
}
func (t *TestSuite) AssertOk() {
t.AssertStatus(http.StatusOK)
}
func (t *TestSuite) AssertNotFound() {
t.AssertStatus(http.StatusNotFound)
}
func (t *TestSuite) AssertStatus(status int) {
if t.Response.StatusCode != status {
panic(fmt.Errorf("Status: (expected) %d != %d (actual)", status, t.Response.StatusCode))
}
}
func (t *TestSuite) AssertContentType(contentType string) {
t.AssertHeader("Content-Type", contentType)
}
func (t *TestSuite) AssertHeader(name, value string) {
actual := t.Response.Header.Get(name)
if actual != value {
panic(fmt.Errorf("Header %s: (expected) %s != %s (actual)", name, value, actual))
}
}
func (t *TestSuite) AssertEqual(expected, actual interface{}) {
if !Equal(expected, actual) {
panic(fmt.Errorf("(expected) %v != %v (actual)", expected, actual))
}
}
func (t *TestSuite) Assert(exp bool) {
t.Assertf(exp, "Assertion failed")
}
func (t *TestSuite) Assertf(exp bool, formatStr string, args ...interface{}) {
if !exp {
panic(fmt.Errorf(formatStr, args))
}
}
rename method to Host, fix missing newline
package revel
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"code.google.com/p/go.net/websocket"
)
type TestSuite struct {
Client *http.Client
Response *http.Response
ResponseBody []byte
}
var TestSuites []interface{} // Array of structs that embed TestSuite
// NewTestSuite returns an initialized TestSuite ready for use. It is invoked
// by the test harness to initialize the embedded field in application tests.
func NewTestSuite() TestSuite {
return TestSuite{Client: &http.Client{}}
}
// Return the address and port of the server, e.g. "127.0.0.1:8557"
func (t *TestSuite) Host() string {
if Server.Addr[0] == ':' {
return "127.0.0.1" + Server.Addr
}
return Server.Addr
}
// Return the base http URL of the server, e.g. "http://127.0.0.1:8557"
func (t *TestSuite) BaseUrl() string {
return "http://" + t.Host()
}
// Return the base websocket URL of the server, e.g. "ws://127.0.0.1:8557"
func (t *TestSuite) WebSocketUrl() string {
return "ws://" + t.Host()
}
// Issue a GET request to the given path and store the result in Request and
// RequestBody.
func (t *TestSuite) Get(path string) {
req, err := http.NewRequest("GET", t.BaseUrl()+path, nil)
if err != nil {
panic(err)
}
t.MakeRequest(req)
}
// Issue a POST request to the given path, sending the given Content-Type and
// data, and store the result in Request and RequestBody. "data" may be nil.
func (t *TestSuite) Post(path string, contentType string, reader io.Reader) {
req, err := http.NewRequest("POST", t.BaseUrl()+path, reader)
if err != nil {
panic(err)
}
req.Header.Set("Content-Type", contentType)
t.MakeRequest(req)
}
// Issue a POST request to the given path as a form post of the given key and
// values, and store the result in Request and RequestBody.
func (t *TestSuite) PostForm(path string, data url.Values) {
t.Post(path, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// Issue any request and read the response. If successful, the caller may
// examine the Response and ResponseBody properties.
func (t *TestSuite) MakeRequest(req *http.Request) {
var err error
if t.Response, err = t.Client.Do(req); err != nil {
panic(err)
}
if t.ResponseBody, err = ioutil.ReadAll(t.Response.Body); err != nil {
panic(err)
}
}
// Create a websocket connection to the given path and return the connection
func (t *TestSuite) WebSocket(path string) *websocket.Conn{
origin := t.BaseUrl() + "/"
url := t.WebSocketUrl() + path
ws, err := websocket.Dial(url, "", origin)
if err != nil {
panic(err)
}
return ws
}
func (t *TestSuite) AssertOk() {
t.AssertStatus(http.StatusOK)
}
func (t *TestSuite) AssertNotFound() {
t.AssertStatus(http.StatusNotFound)
}
func (t *TestSuite) AssertStatus(status int) {
if t.Response.StatusCode != status {
panic(fmt.Errorf("Status: (expected) %d != %d (actual)", status, t.Response.StatusCode))
}
}
func (t *TestSuite) AssertContentType(contentType string) {
t.AssertHeader("Content-Type", contentType)
}
func (t *TestSuite) AssertHeader(name, value string) {
actual := t.Response.Header.Get(name)
if actual != value {
panic(fmt.Errorf("Header %s: (expected) %s != %s (actual)", name, value, actual))
}
}
func (t *TestSuite) AssertEqual(expected, actual interface{}) {
if !Equal(expected, actual) {
panic(fmt.Errorf("(expected) %v != %v (actual)", expected, actual))
}
}
func (t *TestSuite) Assert(exp bool) {
t.Assertf(exp, "Assertion failed")
}
func (t *TestSuite) Assertf(exp bool, formatStr string, args ...interface{}) {
if !exp {
panic(fmt.Errorf(formatStr, args))
}
}
|
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package gce
import (
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/juju/errors"
"github.com/juju/juju/instance"
"github.com/juju/juju/network"
)
type environInstance struct {
id instance.Id
env *environ
zone string
rootDiskMB uint64
// TODO(ericsnow) rename this to "raw"?
gce *compute.Instance
}
var _ instance.Instance = (*environInstance)(nil)
func newInstance(raw *compute.Instance, env *environ) *environInstance {
inst := environInstance{
id: instance.Id(raw.Name),
env: env,
zone: zoneName(raw),
}
inst.update(raw)
return &inst
}
func (inst *environInstance) Id() instance.Id {
return inst.id
}
func (inst *environInstance) Status() string {
return inst.gce.Status
}
func (inst *environInstance) update(raw *compute.Instance) {
inst.gce = raw
attached := rootDisk(raw)
if diskSize, ok := inst.diskSize(attached); ok {
inst.rootDiskMB = diskSize
}
}
func (inst *environInstance) diskSize(attached *compute.AttachedDisk) (uint64, bool) {
diskSizeGB, err := diskSizeGB(attached)
if err != nil {
logger.Errorf("error while getting root disk size: %v", err)
disk, err := inst.env.gce.disk(attached.Source)
if err != nil {
logger.Errorf("error while getting root disk: %v", err)
// Leave it what it was.
return 0, false
}
diskSizeGB = disk.SizeGb
}
return uint64(diskSizeGB) * 1024, true
}
func (inst *environInstance) Refresh() error {
env := inst.env.getSnapshot()
raw, err := env.gce.instance(inst.zone, string(inst.id))
if err != nil {
return errors.Trace(err)
}
// TODO(ericsnow) Drop the hack of carrying over InitializeParams?
if rootDisk(raw).InitializeParams == nil {
rootDisk(raw).InitializeParams = rootDisk(inst.gce).InitializeParams
}
inst.update(raw)
return nil
}
func (inst *environInstance) Addresses() ([]network.Address, error) {
var addresses []network.Address
for _, netif := range inst.gce.NetworkInterfaces {
// Add public addresses.
for _, accessConfig := range netif.AccessConfigs {
if accessConfig.NatIP == "" {
continue
}
address := network.Address{
Value: accessConfig.NatIP,
Type: network.IPv4Address,
Scope: network.ScopePublic,
}
addresses = append(addresses, address)
}
// Add private address.
if netif.NetworkIP == "" {
continue
}
address := network.Address{
Value: netif.NetworkIP,
Type: network.IPv4Address,
Scope: network.ScopeCloudLocal,
}
addresses = append(addresses, address)
}
return addresses, nil
}
func findInst(id instance.Id, instances []instance.Instance) instance.Instance {
for _, inst := range instances {
if id == inst.Id() {
return inst
}
}
return nil
}
// firewall stuff
// OpenPorts opens the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) OpenPorts(machineId string, ports []network.PortRange) error {
env := inst.env.getSnapshot()
err := env.openPorts(machineId, ports)
return errors.Trace(err)
}
// ClosePorts closes the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) ClosePorts(machineId string, ports []network.PortRange) error {
env := inst.env.getSnapshot()
err := env.closePorts(machineId, ports)
return errors.Trace(err)
}
// Ports returns the set of ports open on the instance, which
// should have been started with the given machine id.
// The ports are returned as sorted by SortPorts.
func (inst *environInstance) Ports(machineId string) ([]network.PortRange, error) {
env := inst.env.getSnapshot()
ports, err := env.ports(machineId)
return ports, errors.Trace(err)
}
provider/gce: Log warnings instead of errors.
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package gce
import (
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/juju/errors"
"github.com/juju/juju/instance"
"github.com/juju/juju/network"
)
type environInstance struct {
id instance.Id
env *environ
zone string
rootDiskMB uint64
// TODO(ericsnow) rename this to "raw"?
gce *compute.Instance
}
var _ instance.Instance = (*environInstance)(nil)
func newInstance(raw *compute.Instance, env *environ) *environInstance {
inst := environInstance{
id: instance.Id(raw.Name),
env: env,
zone: zoneName(raw),
}
inst.update(raw)
return &inst
}
func (inst *environInstance) Id() instance.Id {
return inst.id
}
func (inst *environInstance) Status() string {
return inst.gce.Status
}
func (inst *environInstance) update(raw *compute.Instance) {
inst.gce = raw
attached := rootDisk(raw)
if diskSize, ok := inst.diskSize(attached); ok {
inst.rootDiskMB = diskSize
}
}
func (inst *environInstance) diskSize(attached *compute.AttachedDisk) (uint64, bool) {
diskSizeGB, err := diskSizeGB(attached)
if err != nil {
logger.Warningf("error while getting root disk size: %v", err)
disk, err := inst.env.gce.disk(attached.Source)
if err != nil {
logger.Warningf("error while getting root disk: %v", err)
// Leave it what it was.
return 0, false
}
diskSizeGB = disk.SizeGb
}
return uint64(diskSizeGB) * 1024, true
}
func (inst *environInstance) Refresh() error {
env := inst.env.getSnapshot()
raw, err := env.gce.instance(inst.zone, string(inst.id))
if err != nil {
return errors.Trace(err)
}
// TODO(ericsnow) Drop the hack of carrying over InitializeParams?
if rootDisk(raw).InitializeParams == nil {
rootDisk(raw).InitializeParams = rootDisk(inst.gce).InitializeParams
}
inst.update(raw)
return nil
}
func (inst *environInstance) Addresses() ([]network.Address, error) {
var addresses []network.Address
for _, netif := range inst.gce.NetworkInterfaces {
// Add public addresses.
for _, accessConfig := range netif.AccessConfigs {
if accessConfig.NatIP == "" {
continue
}
address := network.Address{
Value: accessConfig.NatIP,
Type: network.IPv4Address,
Scope: network.ScopePublic,
}
addresses = append(addresses, address)
}
// Add private address.
if netif.NetworkIP == "" {
continue
}
address := network.Address{
Value: netif.NetworkIP,
Type: network.IPv4Address,
Scope: network.ScopeCloudLocal,
}
addresses = append(addresses, address)
}
return addresses, nil
}
func findInst(id instance.Id, instances []instance.Instance) instance.Instance {
for _, inst := range instances {
if id == inst.Id() {
return inst
}
}
return nil
}
// firewall stuff
// OpenPorts opens the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) OpenPorts(machineId string, ports []network.PortRange) error {
env := inst.env.getSnapshot()
err := env.openPorts(machineId, ports)
return errors.Trace(err)
}
// ClosePorts closes the given ports on the instance, which
// should have been started with the given machine id.
func (inst *environInstance) ClosePorts(machineId string, ports []network.PortRange) error {
env := inst.env.getSnapshot()
err := env.closePorts(machineId, ports)
return errors.Trace(err)
}
// Ports returns the set of ports open on the instance, which
// should have been started with the given machine id.
// The ports are returned as sorted by SortPorts.
func (inst *environInstance) Ports(machineId string) ([]network.PortRange, error) {
env := inst.env.getSnapshot()
ports, err := env.ports(machineId)
return ports, errors.Trace(err)
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package crier reports finished prowjob status to git providers.
package crier
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/sirupsen/logrus"
v1 "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
clientset "k8s.io/test-infra/prow/client/clientset/versioned"
pjinformers "k8s.io/test-infra/prow/client/informers/externalversions/prowjobs/v1"
)
type reportClient interface {
Report(pj *v1.ProwJob) ([]*v1.ProwJob, error)
GetName() string
ShouldReport(pj *v1.ProwJob) bool
}
// Controller struct defines how a controller should encapsulate
// logging, client connectivity, informing (list and watching)
// queueing, and handling of resource changes
type Controller struct {
pjclientset clientset.Interface
queue workqueue.RateLimitingInterface
informer pjinformers.ProwJobInformer
reporter reportClient
numWorkers int
wg *sync.WaitGroup
}
// NewController constructs a new instance of the crier controller.
func NewController(
pjclientset clientset.Interface,
queue workqueue.RateLimitingInterface,
informer pjinformers.ProwJobInformer,
reporter reportClient,
numWorkers int) *Controller {
return &Controller{
pjclientset: pjclientset,
queue: queue,
informer: informer,
reporter: reporter,
numWorkers: numWorkers,
wg: &sync.WaitGroup{},
}
}
// Run is the main path of execution for the controller loop.
func (c *Controller) Run(ctx context.Context) {
// handle a panic with logging and exiting
defer utilruntime.HandleCrash()
logrus.Info("Initiating controller")
c.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
logrus.WithField("prowjob", key).Debug("Add prowjob")
if err != nil {
logrus.WithError(err).Error("Cannot get key from object meta")
return
}
c.queue.AddRateLimited(key)
},
UpdateFunc: func(oldObj, newObj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(newObj)
logrus.WithField("prowjob", key).Debug("Update prowjob")
if err != nil {
logrus.WithError(err).Error("Cannot get key from object meta")
return
}
c.queue.AddRateLimited(key)
},
})
// run the informer to start listing and watching resources
go c.informer.Informer().Run(ctx.Done())
// do the initial synchronization (one time) to populate resources
if !cache.WaitForCacheSync(ctx.Done(), c.HasSynced) {
utilruntime.HandleError(fmt.Errorf("Error syncing cache"))
return
}
logrus.Info("Controller.Run: cache sync complete")
// run the runWorker method every second with a stop channel
for i := 0; i < c.numWorkers; i++ {
go wait.Until(c.runWorker, time.Second, ctx.Done())
}
logrus.Infof("Started %d workers", c.numWorkers)
<-ctx.Done()
logrus.Info("Shutting down workers")
// ignore new items in the queue but when all goroutines
// have completed existing items then shutdown
c.queue.ShutDown()
c.wg.Wait()
}
// HasSynced allows us to satisfy the Controller interface by wiring up the informer's HasSynced
// method to it.
func (c *Controller) HasSynced() bool {
return c.informer.Informer().HasSynced()
}
// runWorker executes the loop to process new items added to the queue.
func (c *Controller) runWorker() {
c.wg.Add(1)
for c.processNextItem() {
}
c.wg.Done()
}
func (c *Controller) retry(key interface{}, err error) bool {
keyRaw := key.(string)
if c.queue.NumRequeues(key) < 5 {
logrus.WithError(err).WithField("prowjob", keyRaw).Info("Failed processing item, retrying")
c.queue.AddRateLimited(key)
} else {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("Failed processing item, no more retries")
c.queue.Forget(key)
}
return true
}
func (c *Controller) updateReportState(pj *v1.ProwJob) error {
pjData, err := json.Marshal(pj)
if err != nil {
return fmt.Errorf("error marshal pj: %v", err)
}
// update pj report status
newpj := pj.DeepCopy()
// we set omitempty on PrevReportStates, so here we need to init it if is nil
if newpj.Status.PrevReportStates == nil {
newpj.Status.PrevReportStates = map[string]v1.ProwJobState{}
}
newpj.Status.PrevReportStates[c.reporter.GetName()] = newpj.Status.State
newpjData, err := json.Marshal(newpj)
if err != nil {
return fmt.Errorf("error marshal new pj: %v", err)
}
patch, err := jsonpatch.CreateMergePatch(pjData, newpjData)
if err != nil {
return fmt.Errorf("error CreateMergePatch: %v", err)
}
if len(patch) == 0 {
logrus.Warnf("Empty merge patch: pjData: %s, newpjData: %s", string(pjData), string(newpjData))
}
logrus.Infof("Created merge patch: %v", string(patch))
_, err = c.pjclientset.ProwV1().ProwJobs(pj.Namespace).Patch(pj.Name, types.MergePatchType, patch)
if err != nil {
return err
}
// Block until the update is in the lister to make sure that events from another controller
// that also does reporting dont trigger another report because our lister doesn't yet contain
// the updated Status
if err := wait.Poll(time.Second, 3*time.Second, func() (bool, error) {
pj, err := c.informer.Lister().ProwJobs(newpj.Namespace).Get(newpj.Name)
if err != nil {
return false, err
}
if pj.Status.PrevReportStates != nil &&
newpj.Status.PrevReportStates[c.reporter.GetName()] == newpj.Status.State {
return true, nil
}
return false, nil
}); err != nil {
return fmt.Errorf("failed to wait for updated report status to be in lister: %v", err)
}
return nil
}
// processNextItem retrieves each queued item and takes the necessary handler action based off of if
// the item was created or deleted.
func (c *Controller) processNextItem() bool {
key, quit := c.queue.Get()
if quit {
logrus.Debug("Queue already shut down, exiting processNextItem")
return false
}
defer c.queue.Done(key)
// assert the string out of the key (format `namespace/name`)
keyRaw := key.(string)
logrus.WithField("key", keyRaw).Debug("processing next key")
namespace, name, err := cache.SplitMetaNamespaceKey(keyRaw)
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("invalid resource key")
c.queue.Forget(key)
return true
}
// take the string key and get the object out of the indexer
//
// item will contain the complex object for the resource and
// exists is a bool that'll indicate whether or not the
// resource was created (true) or deleted (false)
//
// if there is an error in getting the key from the index
// then we want to retry this particular queue key a certain
// number of times (5 here) before we forget the queue key
// and throw an error
pj, err := c.informer.Lister().ProwJobs(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
logrus.WithField("prowjob", keyRaw).Info("object no longer exist")
c.queue.Forget(key)
return true
}
return c.retry(key, err)
}
// not belong to the current reporter
if !pj.Spec.Report || !c.reporter.ShouldReport(pj) {
c.queue.Forget(key)
return true
}
// we set omitempty on PrevReportStates, so here we need to init it if is nil
if pj.Status.PrevReportStates == nil {
pj.Status.PrevReportStates = map[string]v1.ProwJobState{}
}
// already reported current state
if pj.Status.PrevReportStates[c.reporter.GetName()] == pj.Status.State {
logrus.WithField("prowjob", keyRaw).Info("Already reported")
c.queue.Forget(key)
return true
}
logrus.WithField("prowjob", keyRaw).Infof("Will report state : %s", pj.Status.State)
pjs, err := c.reporter.Report(pj)
if err != nil {
fields := logrus.Fields{
"prowjob": keyRaw,
"jobName": pj.Name,
"jobStatus": pj.Status,
}
logrus.WithError(err).WithFields(fields).Error("failed to report job")
return c.retry(key, err)
}
logrus.WithField("prowjob", keyRaw).Info("Updated job, now will update pj")
for _, pjob := range pjs {
if err := c.updateReportState(pjob); err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state")
// theoretically patch should not have this issue, but in case:
// it might be out-dated, try to re-fetch pj and try again
updatedPJ, err := c.pjclientset.ProwV1().ProwJobs(pjob.Namespace).Get(pjob.Name, metav1.GetOptions{})
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to get prowjob from apiserver")
c.queue.Forget(key)
return true
}
if err := c.updateReportState(updatedPJ); err != nil {
// shrug
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state again, give up")
c.queue.Forget(key)
return true
}
}
logrus.WithField("prowjob", keyRaw).Infof("Hunky Dory!, pj : %v, state : %s", pjob.Spec.Job, pjob.Status.State)
}
c.queue.Forget(key)
return true
}
Crier: DeepCopy prowjob after getting it from the lister
Crier hands out prowJob pointers that point to an entry in the lister.
Anything that manipulates these powjobs directly manipulates data in the
lister, which may lead to all kinds of funny issues.
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package crier reports finished prowjob status to git providers.
package crier
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/sirupsen/logrus"
v1 "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
clientset "k8s.io/test-infra/prow/client/clientset/versioned"
pjinformers "k8s.io/test-infra/prow/client/informers/externalversions/prowjobs/v1"
)
type reportClient interface {
Report(pj *v1.ProwJob) ([]*v1.ProwJob, error)
GetName() string
ShouldReport(pj *v1.ProwJob) bool
}
// Controller struct defines how a controller should encapsulate
// logging, client connectivity, informing (list and watching)
// queueing, and handling of resource changes
type Controller struct {
pjclientset clientset.Interface
queue workqueue.RateLimitingInterface
informer pjinformers.ProwJobInformer
reporter reportClient
numWorkers int
wg *sync.WaitGroup
}
// NewController constructs a new instance of the crier controller.
func NewController(
pjclientset clientset.Interface,
queue workqueue.RateLimitingInterface,
informer pjinformers.ProwJobInformer,
reporter reportClient,
numWorkers int) *Controller {
return &Controller{
pjclientset: pjclientset,
queue: queue,
informer: informer,
reporter: reporter,
numWorkers: numWorkers,
wg: &sync.WaitGroup{},
}
}
// Run is the main path of execution for the controller loop.
func (c *Controller) Run(ctx context.Context) {
// handle a panic with logging and exiting
defer utilruntime.HandleCrash()
logrus.Info("Initiating controller")
c.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
logrus.WithField("prowjob", key).Debug("Add prowjob")
if err != nil {
logrus.WithError(err).Error("Cannot get key from object meta")
return
}
c.queue.AddRateLimited(key)
},
UpdateFunc: func(oldObj, newObj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(newObj)
logrus.WithField("prowjob", key).Debug("Update prowjob")
if err != nil {
logrus.WithError(err).Error("Cannot get key from object meta")
return
}
c.queue.AddRateLimited(key)
},
})
// run the informer to start listing and watching resources
go c.informer.Informer().Run(ctx.Done())
// do the initial synchronization (one time) to populate resources
if !cache.WaitForCacheSync(ctx.Done(), c.HasSynced) {
utilruntime.HandleError(fmt.Errorf("Error syncing cache"))
return
}
logrus.Info("Controller.Run: cache sync complete")
// run the runWorker method every second with a stop channel
for i := 0; i < c.numWorkers; i++ {
go wait.Until(c.runWorker, time.Second, ctx.Done())
}
logrus.Infof("Started %d workers", c.numWorkers)
<-ctx.Done()
logrus.Info("Shutting down workers")
// ignore new items in the queue but when all goroutines
// have completed existing items then shutdown
c.queue.ShutDown()
c.wg.Wait()
}
// HasSynced allows us to satisfy the Controller interface by wiring up the informer's HasSynced
// method to it.
func (c *Controller) HasSynced() bool {
return c.informer.Informer().HasSynced()
}
// runWorker executes the loop to process new items added to the queue.
func (c *Controller) runWorker() {
c.wg.Add(1)
for c.processNextItem() {
}
c.wg.Done()
}
func (c *Controller) retry(key interface{}, err error) bool {
keyRaw := key.(string)
if c.queue.NumRequeues(key) < 5 {
logrus.WithError(err).WithField("prowjob", keyRaw).Info("Failed processing item, retrying")
c.queue.AddRateLimited(key)
} else {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("Failed processing item, no more retries")
c.queue.Forget(key)
}
return true
}
func (c *Controller) updateReportState(pj *v1.ProwJob) error {
pjData, err := json.Marshal(pj)
if err != nil {
return fmt.Errorf("error marshal pj: %v", err)
}
// update pj report status
newpj := pj.DeepCopy()
// we set omitempty on PrevReportStates, so here we need to init it if is nil
if newpj.Status.PrevReportStates == nil {
newpj.Status.PrevReportStates = map[string]v1.ProwJobState{}
}
newpj.Status.PrevReportStates[c.reporter.GetName()] = newpj.Status.State
newpjData, err := json.Marshal(newpj)
if err != nil {
return fmt.Errorf("error marshal new pj: %v", err)
}
patch, err := jsonpatch.CreateMergePatch(pjData, newpjData)
if err != nil {
return fmt.Errorf("error CreateMergePatch: %v", err)
}
if len(patch) == 0 {
logrus.Warnf("Empty merge patch: pjData: %s, newpjData: %s", string(pjData), string(newpjData))
}
logrus.Infof("Created merge patch: %v", string(patch))
_, err = c.pjclientset.ProwV1().ProwJobs(pj.Namespace).Patch(pj.Name, types.MergePatchType, patch)
if err != nil {
return err
}
// Block until the update is in the lister to make sure that events from another controller
// that also does reporting dont trigger another report because our lister doesn't yet contain
// the updated Status
if err := wait.Poll(time.Second, 3*time.Second, func() (bool, error) {
pj, err := c.informer.Lister().ProwJobs(newpj.Namespace).Get(newpj.Name)
if err != nil {
return false, err
}
if pj.Status.PrevReportStates != nil &&
newpj.Status.PrevReportStates[c.reporter.GetName()] == newpj.Status.State {
return true, nil
}
return false, nil
}); err != nil {
return fmt.Errorf("failed to wait for updated report status to be in lister: %v", err)
}
return nil
}
// processNextItem retrieves each queued item and takes the necessary handler action based off of if
// the item was created or deleted.
func (c *Controller) processNextItem() bool {
key, quit := c.queue.Get()
if quit {
logrus.Debug("Queue already shut down, exiting processNextItem")
return false
}
defer c.queue.Done(key)
// assert the string out of the key (format `namespace/name`)
keyRaw := key.(string)
logrus.WithField("key", keyRaw).Debug("processing next key")
namespace, name, err := cache.SplitMetaNamespaceKey(keyRaw)
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("invalid resource key")
c.queue.Forget(key)
return true
}
// take the string key and get the object out of the indexer
//
// item will contain the complex object for the resource and
// exists is a bool that'll indicate whether or not the
// resource was created (true) or deleted (false)
//
// if there is an error in getting the key from the index
// then we want to retry this particular queue key a certain
// number of times (5 here) before we forget the queue key
// and throw an error
readOnlyPJ, err := c.informer.Lister().ProwJobs(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
logrus.WithField("prowjob", keyRaw).Info("object no longer exist")
c.queue.Forget(key)
return true
}
return c.retry(key, err)
}
pj := readOnlyPJ.DeepCopy()
// not belong to the current reporter
if !pj.Spec.Report || !c.reporter.ShouldReport(pj) {
c.queue.Forget(key)
return true
}
// we set omitempty on PrevReportStates, so here we need to init it if is nil
if pj.Status.PrevReportStates == nil {
pj.Status.PrevReportStates = map[string]v1.ProwJobState{}
}
// already reported current state
if pj.Status.PrevReportStates[c.reporter.GetName()] == pj.Status.State {
logrus.WithField("prowjob", keyRaw).Info("Already reported")
c.queue.Forget(key)
return true
}
logrus.WithField("prowjob", keyRaw).Infof("Will report state : %s", pj.Status.State)
pjs, err := c.reporter.Report(pj)
if err != nil {
fields := logrus.Fields{
"prowjob": keyRaw,
"jobName": pj.Name,
"jobStatus": pj.Status,
}
logrus.WithError(err).WithFields(fields).Error("failed to report job")
return c.retry(key, err)
}
logrus.WithField("prowjob", keyRaw).Info("Updated job, now will update pj")
for _, pjob := range pjs {
if err := c.updateReportState(pjob); err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state")
// theoretically patch should not have this issue, but in case:
// it might be out-dated, try to re-fetch pj and try again
updatedPJ, err := c.pjclientset.ProwV1().ProwJobs(pjob.Namespace).Get(pjob.Name, metav1.GetOptions{})
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to get prowjob from apiserver")
c.queue.Forget(key)
return true
}
if err := c.updateReportState(updatedPJ); err != nil {
// shrug
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state again, give up")
c.queue.Forget(key)
return true
}
}
logrus.WithField("prowjob", keyRaw).Infof("Hunky Dory!, pj : %v, state : %s", pjob.Spec.Job, pjob.Status.State)
}
c.queue.Forget(key)
return true
}
|
package errdefs // import "github.com/docker/docker/errdefs"
import "context"
type errNotFound struct{ error }
func (errNotFound) NotFound() {}
func (e errNotFound) Cause() error {
return e.error
}
// NotFound is a helper to create an error of the class with the same name from any error type
func NotFound(err error) error {
if err == nil {
return nil
}
return errNotFound{err}
}
type errInvalidParameter struct{ error }
func (errInvalidParameter) InvalidParameter() {}
func (e errInvalidParameter) Cause() error {
return e.error
}
// InvalidParameter is a helper to create an error of the class with the same name from any error type
func InvalidParameter(err error) error {
if err == nil {
return nil
}
return errInvalidParameter{err}
}
type errConflict struct{ error }
func (errConflict) Conflict() {}
func (e errConflict) Cause() error {
return e.error
}
// Conflict is a helper to create an error of the class with the same name from any error type
func Conflict(err error) error {
if err == nil {
return nil
}
return errConflict{err}
}
type errUnauthorized struct{ error }
func (errUnauthorized) Unauthorized() {}
func (e errUnauthorized) Cause() error {
return e.error
}
// Unauthorized is a helper to create an error of the class with the same name from any error type
func Unauthorized(err error) error {
if err == nil {
return nil
}
return errUnauthorized{err}
}
type errUnavailable struct{ error }
func (errUnavailable) Unavailable() {}
func (e errUnavailable) Cause() error {
return e.error
}
// Unavailable is a helper to create an error of the class with the same name from any error type
func Unavailable(err error) error {
return errUnavailable{err}
}
type errForbidden struct{ error }
func (errForbidden) Forbidden() {}
func (e errForbidden) Cause() error {
return e.error
}
// Forbidden is a helper to create an error of the class with the same name from any error type
func Forbidden(err error) error {
if err == nil {
return nil
}
return errForbidden{err}
}
type errSystem struct{ error }
func (errSystem) System() {}
func (e errSystem) Cause() error {
return e.error
}
// System is a helper to create an error of the class with the same name from any error type
func System(err error) error {
if err == nil {
return nil
}
return errSystem{err}
}
type errNotModified struct{ error }
func (errNotModified) NotModified() {}
func (e errNotModified) Cause() error {
return e.error
}
// NotModified is a helper to create an error of the class with the same name from any error type
func NotModified(err error) error {
if err == nil {
return nil
}
return errNotModified{err}
}
type errAlreadyExists struct{ error }
func (errAlreadyExists) AlreadyExists() {}
func (e errAlreadyExists) Cause() error {
return e.error
}
// AlreadyExists is a helper to create an error of the class with the same name from any error type
func AlreadyExists(err error) error {
if err == nil {
return nil
}
return errAlreadyExists{err}
}
type errNotImplemented struct{ error }
func (errNotImplemented) NotImplemented() {}
func (e errNotImplemented) Cause() error {
return e.error
}
// NotImplemented is a helper to create an error of the class with the same name from any error type
func NotImplemented(err error) error {
if err == nil {
return nil
}
return errNotImplemented{err}
}
type errUnknown struct{ error }
func (errUnknown) Unknown() {}
func (e errUnknown) Cause() error {
return e.error
}
// Unknown is a helper to create an error of the class with the same name from any error type
func Unknown(err error) error {
if err == nil {
return nil
}
return errUnknown{err}
}
type errCancelled struct{ error }
func (errCancelled) Cancelled() {}
func (e errCancelled) Cause() error {
return e.error
}
// Cancelled is a helper to create an error of the class with the same name from any error type
func Cancelled(err error) error {
if err == nil {
return nil
}
return errCancelled{err}
}
type errDeadline struct{ error }
func (errDeadline) DeadlineExceeded() {}
func (e errDeadline) Cause() error {
return e.error
}
// Deadline is a helper to create an error of the class with the same name from any error type
func Deadline(err error) error {
if err == nil {
return nil
}
return errDeadline{err}
}
type errDataLoss struct{ error }
func (errDataLoss) DataLoss() {}
func (e errDataLoss) Cause() error {
return e.error
}
// DataLoss is a helper to create an error of the class with the same name from any error type
func DataLoss(err error) error {
if err == nil {
return nil
}
return errDataLoss{err}
}
// FromContext returns the error class from the passed in context
func FromContext(ctx context.Context) error {
e := ctx.Err()
if e == nil {
return nil
}
if e == context.Canceled {
return Cancelled(e)
}
if e == context.DeadlineExceeded {
return Deadline(e)
}
return Unknown(e)
}
Add missing nil-check on errdefs.Unavailable()
Signed-off-by: Sebastiaan van Stijn <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@gone.nl>
package errdefs // import "github.com/docker/docker/errdefs"
import "context"
type errNotFound struct{ error }
func (errNotFound) NotFound() {}
func (e errNotFound) Cause() error {
return e.error
}
// NotFound is a helper to create an error of the class with the same name from any error type
func NotFound(err error) error {
if err == nil {
return nil
}
return errNotFound{err}
}
type errInvalidParameter struct{ error }
func (errInvalidParameter) InvalidParameter() {}
func (e errInvalidParameter) Cause() error {
return e.error
}
// InvalidParameter is a helper to create an error of the class with the same name from any error type
func InvalidParameter(err error) error {
if err == nil {
return nil
}
return errInvalidParameter{err}
}
type errConflict struct{ error }
func (errConflict) Conflict() {}
func (e errConflict) Cause() error {
return e.error
}
// Conflict is a helper to create an error of the class with the same name from any error type
func Conflict(err error) error {
if err == nil {
return nil
}
return errConflict{err}
}
type errUnauthorized struct{ error }
func (errUnauthorized) Unauthorized() {}
func (e errUnauthorized) Cause() error {
return e.error
}
// Unauthorized is a helper to create an error of the class with the same name from any error type
func Unauthorized(err error) error {
if err == nil {
return nil
}
return errUnauthorized{err}
}
type errUnavailable struct{ error }
func (errUnavailable) Unavailable() {}
func (e errUnavailable) Cause() error {
return e.error
}
// Unavailable is a helper to create an error of the class with the same name from any error type
func Unavailable(err error) error {
if err == nil {
return nil
}
return errUnavailable{err}
}
type errForbidden struct{ error }
func (errForbidden) Forbidden() {}
func (e errForbidden) Cause() error {
return e.error
}
// Forbidden is a helper to create an error of the class with the same name from any error type
func Forbidden(err error) error {
if err == nil {
return nil
}
return errForbidden{err}
}
type errSystem struct{ error }
func (errSystem) System() {}
func (e errSystem) Cause() error {
return e.error
}
// System is a helper to create an error of the class with the same name from any error type
func System(err error) error {
if err == nil {
return nil
}
return errSystem{err}
}
type errNotModified struct{ error }
func (errNotModified) NotModified() {}
func (e errNotModified) Cause() error {
return e.error
}
// NotModified is a helper to create an error of the class with the same name from any error type
func NotModified(err error) error {
if err == nil {
return nil
}
return errNotModified{err}
}
type errAlreadyExists struct{ error }
func (errAlreadyExists) AlreadyExists() {}
func (e errAlreadyExists) Cause() error {
return e.error
}
// AlreadyExists is a helper to create an error of the class with the same name from any error type
func AlreadyExists(err error) error {
if err == nil {
return nil
}
return errAlreadyExists{err}
}
type errNotImplemented struct{ error }
func (errNotImplemented) NotImplemented() {}
func (e errNotImplemented) Cause() error {
return e.error
}
// NotImplemented is a helper to create an error of the class with the same name from any error type
func NotImplemented(err error) error {
if err == nil {
return nil
}
return errNotImplemented{err}
}
type errUnknown struct{ error }
func (errUnknown) Unknown() {}
func (e errUnknown) Cause() error {
return e.error
}
// Unknown is a helper to create an error of the class with the same name from any error type
func Unknown(err error) error {
if err == nil {
return nil
}
return errUnknown{err}
}
type errCancelled struct{ error }
func (errCancelled) Cancelled() {}
func (e errCancelled) Cause() error {
return e.error
}
// Cancelled is a helper to create an error of the class with the same name from any error type
func Cancelled(err error) error {
if err == nil {
return nil
}
return errCancelled{err}
}
type errDeadline struct{ error }
func (errDeadline) DeadlineExceeded() {}
func (e errDeadline) Cause() error {
return e.error
}
// Deadline is a helper to create an error of the class with the same name from any error type
func Deadline(err error) error {
if err == nil {
return nil
}
return errDeadline{err}
}
type errDataLoss struct{ error }
func (errDataLoss) DataLoss() {}
func (e errDataLoss) Cause() error {
return e.error
}
// DataLoss is a helper to create an error of the class with the same name from any error type
func DataLoss(err error) error {
if err == nil {
return nil
}
return errDataLoss{err}
}
// FromContext returns the error class from the passed in context
func FromContext(ctx context.Context) error {
e := ctx.Err()
if e == nil {
return nil
}
if e == context.Canceled {
return Cancelled(e)
}
if e == context.DeadlineExceeded {
return Deadline(e)
}
return Unknown(e)
}
|
package esa
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
)
// Stub テスト用のスタブ
func Stub(filename string, outRes interface{}) (*httptest.Server, *Client) {
stub, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatalln(err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(stub))
}))
c := NewClient("")
c.baseURL = ts.URL
data, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatalln(err)
}
if err := json.Unmarshal([]byte(data), outRes); err != nil {
log.Fatalln(err)
}
return ts, c
}
:green_heart: add http status code to Stub
package esa
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
)
// Stub テスト用のスタブ
func Stub(filename string, outRes interface{}) (*httptest.Server, *Client) {
stub, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatalln(err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var statusCode int
switch r.Method {
case "GET":
statusCode = 200
case "POST":
statusCode = 201
case "PATCH":
statusCode = 200
case "DELETE":
statusCode = 204
default:
statusCode = 200
}
w.WriteHeader(statusCode)
w.Write([]byte(stub))
}))
c := NewClient("")
c.baseURL = ts.URL
data, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatalln(err)
}
if err := json.Unmarshal([]byte(data), outRes); err != nil {
log.Fatalln(err)
}
return ts, c
}
|
// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package web
import (
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/mattermost/platform/api"
"github.com/mattermost/platform/model"
"github.com/mattermost/platform/store"
"github.com/mattermost/platform/utils"
)
var ApiClient *model.Client
var URL string
func Setup() {
if api.Srv == nil {
utils.LoadConfig("config.json")
api.NewServer()
api.StartServer()
api.InitApi()
InitWeb()
URL = "http://localhost" + utils.Cfg.ServiceSettings.ListenAddress
ApiClient = model.NewClient(URL)
api.Srv.Store.MarkSystemRanUnitTests()
}
}
func TearDown() {
if api.Srv != nil {
api.StopServer()
}
}
func TestStatic(t *testing.T) {
Setup()
// add a short delay to make sure the server is ready to receive requests
time.Sleep(1 * time.Second)
resp, err := http.Get(URL + "/static/images/favicon-16x16.png")
if err != nil {
t.Fatalf("got error while trying to get static files %v", err)
} else if resp.StatusCode != http.StatusOK {
t.Fatalf("couldn't get static files %v", resp.StatusCode)
}
}
func TestGetAccessToken(t *testing.T) {
Setup()
team := model.Team{DisplayName: "Name", Name: "z-z-" + model.NewId() + "a", Email: "test@nowhere.com", Type: model.TEAM_OPEN}
rteam, _ := ApiClient.CreateTeam(&team)
user := model.User{TeamId: rteam.Data.(*model.Team).Id, Email: strings.ToLower(model.NewId()) + "corey+test@test.com", Password: "pwd"}
ruser := ApiClient.Must(ApiClient.CreateUser(&user, "")).Data.(*model.User)
store.Must(api.Srv.Store.User().VerifyEmail(ruser.Id))
app := &model.OAuthApp{Name: "TestApp" + model.NewId(), Homepage: "https://nowhere.com", Description: "test", CallbackUrls: []string{"https://nowhere.com"}}
if !utils.Cfg.ServiceSettings.EnableOAuthServiceProvider {
data := url.Values{"grant_type": []string{"junk"}, "client_id": []string{"12345678901234567890123456"}, "client_secret": []string{"12345678901234567890123456"}, "code": []string{"junk"}, "redirect_uri": []string{app.CallbackUrls[0]}}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - oauth providing turned off")
}
} else {
ApiClient.Must(ApiClient.LoginById(ruser.Id, "pwd"))
app = ApiClient.Must(ApiClient.RegisterApp(app)).Data.(*model.OAuthApp)
redirect := ApiClient.Must(ApiClient.AllowOAuth(model.AUTHCODE_RESPONSE_TYPE, app.Id, app.CallbackUrls[0], "all", "123")).Data.(map[string]string)["redirect"]
rurl, _ := url.Parse(redirect)
ApiClient.Logout()
data := url.Values{"grant_type": []string{"junk"}, "client_id": []string{app.Id}, "client_secret": []string{app.ClientSecret}, "code": []string{rurl.Query().Get("code")}, "redirect_uri": []string{app.CallbackUrls[0]}}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad grant type")
}
data.Set("grant_type", model.ACCESS_TOKEN_GRANT_TYPE)
data.Set("client_id", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing client id")
}
data.Set("client_id", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad client id")
}
data.Set("client_id", app.Id)
data.Set("client_secret", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing client secret")
}
data.Set("client_secret", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad client secret")
}
data.Set("client_secret", app.ClientSecret)
data.Set("code", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing code")
}
data.Set("code", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad code")
}
data.Set("code", rurl.Query().Get("code"))
data.Set("redirect_uri", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - non-matching redirect uri")
}
// reset data for successful request
data.Set("grant_type", model.ACCESS_TOKEN_GRANT_TYPE)
data.Set("client_id", app.Id)
data.Set("client_secret", app.ClientSecret)
data.Set("code", rurl.Query().Get("code"))
data.Set("redirect_uri", app.CallbackUrls[0])
token := ""
if result, err := ApiClient.GetAccessToken(data); err != nil {
t.Fatal(err)
} else {
rsp := result.Data.(*model.AccessResponse)
if len(rsp.AccessToken) == 0 {
t.Fatal("access token not returned")
} else {
token = rsp.AccessToken
}
if rsp.TokenType != model.ACCESS_TOKEN_TYPE {
t.Fatal("access token type incorrect")
}
}
if result, err := ApiClient.DoApiGet("/users/profiles?access_token="+token, "", ""); err != nil {
t.Fatal(err)
} else {
userMap := model.UserMapFromJson(result.Body)
if len(userMap) == 0 {
t.Fatal("user map empty - did not get results correctly")
}
}
if _, err := ApiClient.DoApiGet("/users/profiles", "", ""); err == nil {
t.Fatal("should have failed - no access token provided")
}
if _, err := ApiClient.DoApiGet("/users/profiles?access_token=junk", "", ""); err == nil {
t.Fatal("should have failed - bad access token provided")
}
ApiClient.SetOAuthToken(token)
if result, err := ApiClient.DoApiGet("/users/profiles", "", ""); err != nil {
t.Fatal(err)
} else {
userMap := model.UserMapFromJson(result.Body)
if len(userMap) == 0 {
t.Fatal("user map empty - did not get results correctly")
}
}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - tried to reuse auth code")
}
ApiClient.ClearOAuthToken()
}
}
func TestIncomingWebhook(t *testing.T) {
Setup()
team := &model.Team{DisplayName: "Name", Name: "z-z-" + model.NewId() + "a", Email: "test@nowhere.com", Type: model.TEAM_OPEN}
team = ApiClient.Must(ApiClient.CreateTeam(team)).Data.(*model.Team)
user := &model.User{TeamId: team.Id, Email: model.NewId() + "corey+test@test.com", Nickname: "Corey Hulen", Password: "pwd"}
user = ApiClient.Must(ApiClient.CreateUser(user, "")).Data.(*model.User)
store.Must(api.Srv.Store.User().VerifyEmail(user.Id))
ApiClient.LoginByEmail(team.Name, user.Email, "pwd")
channel1 := &model.Channel{DisplayName: "Test API Name", Name: "a" + model.NewId() + "a", Type: model.CHANNEL_OPEN, TeamId: team.Id}
channel1 = ApiClient.Must(ApiClient.CreateChannel(channel1)).Data.(*model.Channel)
if utils.Cfg.ServiceSettings.EnableIncomingWebhooks {
hook1 := &model.IncomingWebhook{ChannelId: channel1.Id}
hook1 = ApiClient.Must(ApiClient.CreateIncomingWebhook(hook1)).Data.(*model.IncomingWebhook)
payload := "payload={\"text\": \"test text\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err != nil {
t.Fatal(err)
}
payload = "payload={\"text\": \"\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err == nil {
t.Fatal("should have errored - no text to post")
}
payload = "payload={\"text\": \"test text\", \"channel\": \"junk\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err == nil {
t.Fatal("should have errored - bad channel")
}
payload = "payload={\"text\": \"test text\"}"
if _, err := ApiClient.PostToWebhook("abc123", payload); err == nil {
t.Fatal("should have errored - bad hook")
}
} else {
if _, err := ApiClient.PostToWebhook("123", "123"); err == nil {
t.Fatal("should have failed - webhooks turned off")
}
}
}
func TestZZWebTearDown(t *testing.T) {
// *IMPORTANT*
// This should be the last function in any test file
// that calls Setup()
// Should be in the last file too sorted by name
time.Sleep(2 * time.Second)
TearDown()
}
Updating favicon file
// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package web
import (
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/mattermost/platform/api"
"github.com/mattermost/platform/model"
"github.com/mattermost/platform/store"
"github.com/mattermost/platform/utils"
)
var ApiClient *model.Client
var URL string
func Setup() {
if api.Srv == nil {
utils.LoadConfig("config.json")
api.NewServer()
api.StartServer()
api.InitApi()
InitWeb()
URL = "http://localhost" + utils.Cfg.ServiceSettings.ListenAddress
ApiClient = model.NewClient(URL)
api.Srv.Store.MarkSystemRanUnitTests()
}
}
func TearDown() {
if api.Srv != nil {
api.StopServer()
}
}
func TestStatic(t *testing.T) {
Setup()
// add a short delay to make sure the server is ready to receive requests
time.Sleep(1 * time.Second)
resp, err := http.Get(URL + "/static/images/favicon/favicon-16x16.png")
if err != nil {
t.Fatalf("got error while trying to get static files %v", err)
} else if resp.StatusCode != http.StatusOK {
t.Fatalf("couldn't get static files %v", resp.StatusCode)
}
}
func TestGetAccessToken(t *testing.T) {
Setup()
team := model.Team{DisplayName: "Name", Name: "z-z-" + model.NewId() + "a", Email: "test@nowhere.com", Type: model.TEAM_OPEN}
rteam, _ := ApiClient.CreateTeam(&team)
user := model.User{TeamId: rteam.Data.(*model.Team).Id, Email: strings.ToLower(model.NewId()) + "corey+test@test.com", Password: "pwd"}
ruser := ApiClient.Must(ApiClient.CreateUser(&user, "")).Data.(*model.User)
store.Must(api.Srv.Store.User().VerifyEmail(ruser.Id))
app := &model.OAuthApp{Name: "TestApp" + model.NewId(), Homepage: "https://nowhere.com", Description: "test", CallbackUrls: []string{"https://nowhere.com"}}
if !utils.Cfg.ServiceSettings.EnableOAuthServiceProvider {
data := url.Values{"grant_type": []string{"junk"}, "client_id": []string{"12345678901234567890123456"}, "client_secret": []string{"12345678901234567890123456"}, "code": []string{"junk"}, "redirect_uri": []string{app.CallbackUrls[0]}}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - oauth providing turned off")
}
} else {
ApiClient.Must(ApiClient.LoginById(ruser.Id, "pwd"))
app = ApiClient.Must(ApiClient.RegisterApp(app)).Data.(*model.OAuthApp)
redirect := ApiClient.Must(ApiClient.AllowOAuth(model.AUTHCODE_RESPONSE_TYPE, app.Id, app.CallbackUrls[0], "all", "123")).Data.(map[string]string)["redirect"]
rurl, _ := url.Parse(redirect)
ApiClient.Logout()
data := url.Values{"grant_type": []string{"junk"}, "client_id": []string{app.Id}, "client_secret": []string{app.ClientSecret}, "code": []string{rurl.Query().Get("code")}, "redirect_uri": []string{app.CallbackUrls[0]}}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad grant type")
}
data.Set("grant_type", model.ACCESS_TOKEN_GRANT_TYPE)
data.Set("client_id", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing client id")
}
data.Set("client_id", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad client id")
}
data.Set("client_id", app.Id)
data.Set("client_secret", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing client secret")
}
data.Set("client_secret", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad client secret")
}
data.Set("client_secret", app.ClientSecret)
data.Set("code", "")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - missing code")
}
data.Set("code", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - bad code")
}
data.Set("code", rurl.Query().Get("code"))
data.Set("redirect_uri", "junk")
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - non-matching redirect uri")
}
// reset data for successful request
data.Set("grant_type", model.ACCESS_TOKEN_GRANT_TYPE)
data.Set("client_id", app.Id)
data.Set("client_secret", app.ClientSecret)
data.Set("code", rurl.Query().Get("code"))
data.Set("redirect_uri", app.CallbackUrls[0])
token := ""
if result, err := ApiClient.GetAccessToken(data); err != nil {
t.Fatal(err)
} else {
rsp := result.Data.(*model.AccessResponse)
if len(rsp.AccessToken) == 0 {
t.Fatal("access token not returned")
} else {
token = rsp.AccessToken
}
if rsp.TokenType != model.ACCESS_TOKEN_TYPE {
t.Fatal("access token type incorrect")
}
}
if result, err := ApiClient.DoApiGet("/users/profiles?access_token="+token, "", ""); err != nil {
t.Fatal(err)
} else {
userMap := model.UserMapFromJson(result.Body)
if len(userMap) == 0 {
t.Fatal("user map empty - did not get results correctly")
}
}
if _, err := ApiClient.DoApiGet("/users/profiles", "", ""); err == nil {
t.Fatal("should have failed - no access token provided")
}
if _, err := ApiClient.DoApiGet("/users/profiles?access_token=junk", "", ""); err == nil {
t.Fatal("should have failed - bad access token provided")
}
ApiClient.SetOAuthToken(token)
if result, err := ApiClient.DoApiGet("/users/profiles", "", ""); err != nil {
t.Fatal(err)
} else {
userMap := model.UserMapFromJson(result.Body)
if len(userMap) == 0 {
t.Fatal("user map empty - did not get results correctly")
}
}
if _, err := ApiClient.GetAccessToken(data); err == nil {
t.Fatal("should have failed - tried to reuse auth code")
}
ApiClient.ClearOAuthToken()
}
}
func TestIncomingWebhook(t *testing.T) {
Setup()
team := &model.Team{DisplayName: "Name", Name: "z-z-" + model.NewId() + "a", Email: "test@nowhere.com", Type: model.TEAM_OPEN}
team = ApiClient.Must(ApiClient.CreateTeam(team)).Data.(*model.Team)
user := &model.User{TeamId: team.Id, Email: model.NewId() + "corey+test@test.com", Nickname: "Corey Hulen", Password: "pwd"}
user = ApiClient.Must(ApiClient.CreateUser(user, "")).Data.(*model.User)
store.Must(api.Srv.Store.User().VerifyEmail(user.Id))
ApiClient.LoginByEmail(team.Name, user.Email, "pwd")
channel1 := &model.Channel{DisplayName: "Test API Name", Name: "a" + model.NewId() + "a", Type: model.CHANNEL_OPEN, TeamId: team.Id}
channel1 = ApiClient.Must(ApiClient.CreateChannel(channel1)).Data.(*model.Channel)
if utils.Cfg.ServiceSettings.EnableIncomingWebhooks {
hook1 := &model.IncomingWebhook{ChannelId: channel1.Id}
hook1 = ApiClient.Must(ApiClient.CreateIncomingWebhook(hook1)).Data.(*model.IncomingWebhook)
payload := "payload={\"text\": \"test text\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err != nil {
t.Fatal(err)
}
payload = "payload={\"text\": \"\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err == nil {
t.Fatal("should have errored - no text to post")
}
payload = "payload={\"text\": \"test text\", \"channel\": \"junk\"}"
if _, err := ApiClient.PostToWebhook(hook1.Id, payload); err == nil {
t.Fatal("should have errored - bad channel")
}
payload = "payload={\"text\": \"test text\"}"
if _, err := ApiClient.PostToWebhook("abc123", payload); err == nil {
t.Fatal("should have errored - bad hook")
}
} else {
if _, err := ApiClient.PostToWebhook("123", "123"); err == nil {
t.Fatal("should have failed - webhooks turned off")
}
}
}
func TestZZWebTearDown(t *testing.T) {
// *IMPORTANT*
// This should be the last function in any test file
// that calls Setup()
// Should be in the last file too sorted by name
time.Sleep(2 * time.Second)
TearDown()
}
|
package ethutil
import (
_ "fmt"
"math/big"
_ "regexp"
)
// Op codes
var OpCodes = map[string]byte{
// 0x0 range - arithmetic ops
"STOP": 0x00,
"ADD": 0x01,
"MUL": 0x02,
"SUB": 0x03,
"DIV": 0x04,
"SDIV": 0x05,
"MOD": 0x06,
"SMOD": 0x07,
"EXP": 0x08,
"NEG": 0x09,
"LT": 0x0a,
"GT": 0x0b,
"EQ": 0x0c,
"NOT": 0x0d,
// 0x10 range - bit ops
"AND": 0x10,
"OR": 0x11,
"XOR": 0x12,
"BYTE": 0x13,
// 0x20 range - crypto
"SHA3": 0x20,
// 0x30 range - closure state
"ADDRESS": 0x30,
"BALANCE": 0x31,
"ORIGIN": 0x32,
"CALLER": 0x33,
"CALLVALUE": 0x34,
"CALLDATA": 0x35,
"CALLDATASIZE": 0x36,
"GASPRICE": 0x38,
// 0x40 range - block operations
"PREVHASH": 0x40,
"COINBASE": 0x41,
"TIMESTAMP": 0x42,
"NUMBER": 0x43,
"DIFFICULTY": 0x44,
"GASLIMIT": 0x45,
// 0x50 range - 'storage' and execution
"PUSH": 0x50,
"PUSH20": 0x80,
"POP": 0x51,
"DUP": 0x52,
"SWAP": 0x53,
"MLOAD": 0x54,
"MSTORE": 0x55,
"MSTORE8": 0x56,
"SLOAD": 0x57,
"SSTORE": 0x58,
"JUMP": 0x59,
"JUMPI": 0x5a,
"PC": 0x5b,
"MSIZE": 0x5c,
// 0x60 range - closures
"CREATE": 0x60,
"CALL": 0x61,
"RETURN": 0x62,
// 0x70 range - other
"LOG": 0x70,
"SUICIDE": 0x7f,
}
func IsOpCode(s string) bool {
for key, _ := range OpCodes {
if key == s {
return true
}
}
return false
}
func CompileInstr(s interface{}) ([]byte, error) {
switch s.(type) {
case string:
str := s.(string)
isOp := IsOpCode(str)
if isOp {
return []byte{OpCodes[str]}, nil
}
num := new(big.Int)
_, success := num.SetString(str, 0)
// Assume regular bytes during compilation
if !success {
num.SetBytes([]byte(str))
} else {
// tmp fix for 32 bytes
n := BigToBytes(num, 256)
return n, nil
}
return num.Bytes(), nil
case int:
num := BigToBytes(big.NewInt(int64(s.(int))), 256)
return num, nil
case []byte:
return BigD(s.([]byte)).Bytes(), nil
}
return nil, nil
}
// Script compilation functions
// Compiles strings to machine code
func Assemble(instructions ...interface{}) (script []byte) {
//script = make([]string, len(instructions))
for _, val := range instructions {
instr, _ := CompileInstr(val)
//script[i] = string(instr)
script = append(script, instr...)
}
return
}
/*
Prepocessing function that takes init and main apart:
init() {
// something
}
main() {
// main something
}
func PreProcess(data string) (mainInput, initInput string) {
reg := "\\(\\)\\s*{([\\d\\w\\W\\n\\s]+?)}"
mainReg := regexp.MustCompile("main" + reg)
initReg := regexp.MustCompile("init" + reg)
main := mainReg.FindStringSubmatch(data)
if len(main) > 0 {
mainInput = main[1]
} else {
mainInput = data
}
init := initReg.FindStringSubmatch(data)
if len(init) > 0 {
initInput = init[1]
}
return
}
*/
// Very, very dumb parser. Heed no attention :-)
func FindFor(blockMatcher, input string) string {
curCount := -1
length := len(blockMatcher)
matchfst := rune(blockMatcher[0])
var currStr string
for i, run := range input {
// Find init
if curCount == -1 && run == matchfst && input[i:i+length] == blockMatcher {
curCount = 0
} else if curCount > -1 {
if run == '{' {
curCount++
if curCount == 1 {
continue
}
} else if run == '}' {
curCount--
if curCount == 0 {
// we are done
curCount = -1
break
}
}
if curCount > 0 {
currStr += string(run)
}
}
}
return currStr
}
func PreProcess(data string) (mainInput, initInput string) {
mainInput = FindFor("main", data)
if mainInput == "" {
mainInput = data
}
initInput = FindFor("init", data)
return
}
Renamed CALLDATA to CALLDATALOAD
package ethutil
import (
_ "fmt"
"math/big"
_ "regexp"
)
// Op codes
var OpCodes = map[string]byte{
// 0x0 range - arithmetic ops
"STOP": 0x00,
"ADD": 0x01,
"MUL": 0x02,
"SUB": 0x03,
"DIV": 0x04,
"SDIV": 0x05,
"MOD": 0x06,
"SMOD": 0x07,
"EXP": 0x08,
"NEG": 0x09,
"LT": 0x0a,
"GT": 0x0b,
"EQ": 0x0c,
"NOT": 0x0d,
// 0x10 range - bit ops
"AND": 0x10,
"OR": 0x11,
"XOR": 0x12,
"BYTE": 0x13,
// 0x20 range - crypto
"SHA3": 0x20,
// 0x30 range - closure state
"ADDRESS": 0x30,
"BALANCE": 0x31,
"ORIGIN": 0x32,
"CALLER": 0x33,
"CALLVALUE": 0x34,
"CALLDATALOAD": 0x35,
"CALLDATASIZE": 0x36,
"GASPRICE": 0x38,
// 0x40 range - block operations
"PREVHASH": 0x40,
"COINBASE": 0x41,
"TIMESTAMP": 0x42,
"NUMBER": 0x43,
"DIFFICULTY": 0x44,
"GASLIMIT": 0x45,
// 0x50 range - 'storage' and execution
"PUSH": 0x50,
"PUSH20": 0x80,
"POP": 0x51,
"DUP": 0x52,
"SWAP": 0x53,
"MLOAD": 0x54,
"MSTORE": 0x55,
"MSTORE8": 0x56,
"SLOAD": 0x57,
"SSTORE": 0x58,
"JUMP": 0x59,
"JUMPI": 0x5a,
"PC": 0x5b,
"MSIZE": 0x5c,
// 0x60 range - closures
"CREATE": 0x60,
"CALL": 0x61,
"RETURN": 0x62,
// 0x70 range - other
"LOG": 0x70,
"SUICIDE": 0x7f,
}
func IsOpCode(s string) bool {
for key, _ := range OpCodes {
if key == s {
return true
}
}
return false
}
func CompileInstr(s interface{}) ([]byte, error) {
switch s.(type) {
case string:
str := s.(string)
isOp := IsOpCode(str)
if isOp {
return []byte{OpCodes[str]}, nil
}
num := new(big.Int)
_, success := num.SetString(str, 0)
// Assume regular bytes during compilation
if !success {
num.SetBytes([]byte(str))
} else {
// tmp fix for 32 bytes
n := BigToBytes(num, 256)
return n, nil
}
return num.Bytes(), nil
case int:
num := BigToBytes(big.NewInt(int64(s.(int))), 256)
return num, nil
case []byte:
return BigD(s.([]byte)).Bytes(), nil
}
return nil, nil
}
// Script compilation functions
// Compiles strings to machine code
func Assemble(instructions ...interface{}) (script []byte) {
//script = make([]string, len(instructions))
for _, val := range instructions {
instr, _ := CompileInstr(val)
//script[i] = string(instr)
script = append(script, instr...)
}
return
}
/*
Prepocessing function that takes init and main apart:
init() {
// something
}
main() {
// main something
}
func PreProcess(data string) (mainInput, initInput string) {
reg := "\\(\\)\\s*{([\\d\\w\\W\\n\\s]+?)}"
mainReg := regexp.MustCompile("main" + reg)
initReg := regexp.MustCompile("init" + reg)
main := mainReg.FindStringSubmatch(data)
if len(main) > 0 {
mainInput = main[1]
} else {
mainInput = data
}
init := initReg.FindStringSubmatch(data)
if len(init) > 0 {
initInput = init[1]
}
return
}
*/
// Very, very dumb parser. Heed no attention :-)
func FindFor(blockMatcher, input string) string {
curCount := -1
length := len(blockMatcher)
matchfst := rune(blockMatcher[0])
var currStr string
for i, run := range input {
// Find init
if curCount == -1 && run == matchfst && input[i:i+length] == blockMatcher {
curCount = 0
} else if curCount > -1 {
if run == '{' {
curCount++
if curCount == 1 {
continue
}
} else if run == '}' {
curCount--
if curCount == 0 {
// we are done
curCount = -1
break
}
}
if curCount > 0 {
currStr += string(run)
}
}
}
return currStr
}
func PreProcess(data string) (mainInput, initInput string) {
mainInput = FindFor("main", data)
if mainInput == "" {
mainInput = data
}
initInput = FindFor("init", data)
return
}
|
package garden_runner
import (
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/onsi/ginkgo/config"
"github.com/vito/cmdtest"
"github.com/vito/gordon"
)
type GardenRunner struct {
Remote string
Network string
Addr string
DepotPath string
RootPath string
RootFSPath string
SnapshotsPath string
gardenBin string
gardenCmd *exec.Cmd
tmpdir string
}
func New(rootPath, rootFSPath, remote string) (*GardenRunner, error) {
tmpdir, err := ioutil.TempDir(os.TempDir(), "garden-temp-socker")
if err != nil {
return nil, err
}
runner := &GardenRunner{
Remote: remote,
Network: "unix",
Addr: filepath.Join(tmpdir, "warden.sock"),
RootPath: rootPath,
RootFSPath: rootFSPath,
}
return runner, runner.Prepare()
}
func (r *GardenRunner) cmd(command string, argv ...string) *exec.Cmd {
if r.Remote == "" {
return exec.Command(command, argv...)
} else {
args := []string{
"-tt", "-l", "root", r.Remote,
"shopt -s huponexit; " + command,
}
args = append(args, argv...)
return exec.Command("ssh", args...)
}
}
func (r *GardenRunner) Prepare() error {
r.tmpdir = fmt.Sprintf("/tmp/garden-%d-%d", time.Now().UnixNano(), config.GinkgoConfig.ParallelNode)
err := r.cmd("mkdir", r.tmpdir).Run()
if err != nil {
return err
}
if r.Remote == "" {
compiled, err := cmdtest.Build("github.com/pivotal-cf-experimental/garden")
if err != nil {
return err
}
r.gardenBin = compiled
} else {
buildCmd := r.cmd("/vagrant/bin/integration/build")
buildCmd.Stdout = os.Stdout
buildCmd.Stderr = os.Stderr
err = buildCmd.Run()
if err != nil {
return err
}
r.gardenBin = "/vagrant/bin/integration/out/garden"
}
r.DepotPath = filepath.Join(r.tmpdir, "containers")
err = r.cmd("mkdir", "-m", "0755", r.DepotPath).Run()
if err != nil {
return err
}
r.SnapshotsPath = filepath.Join(r.tmpdir, "snapshots")
return r.cmd("mkdir", r.SnapshotsPath).Run()
}
func (r *GardenRunner) Start(argv ...string) error {
gardenArgs := argv
gardenArgs = append(
gardenArgs,
"--listenNetwork", r.Network,
"--listenAddr", r.Addr,
"--root", r.RootPath,
"--depot", r.DepotPath,
"--rootfs", r.RootFSPath,
"--snapshots", r.SnapshotsPath,
"--debug",
"--disableQuotas",
)
garden := r.cmd(r.gardenBin, gardenArgs...)
garden.Stdout = os.Stdout
garden.Stderr = os.Stderr
err := garden.Start()
if err != nil {
return err
}
started := make(chan bool, 1)
stop := make(chan bool, 1)
go r.waitForStart(started, stop)
timeout := 10 * time.Second
r.gardenCmd = garden
select {
case <-started:
return nil
case <-time.After(timeout):
stop <- true
return fmt.Errorf("garden did not come up within %s", timeout)
}
}
func (r *GardenRunner) Stop() error {
if r.gardenCmd == nil {
return nil
}
err := r.gardenCmd.Process.Signal(os.Interrupt)
if err != nil {
return err
}
stopped := make(chan bool, 1)
stop := make(chan bool, 1)
go r.waitForStop(stopped, stop)
timeout := 10 * time.Second
select {
case <-stopped:
r.gardenCmd = nil
return nil
case <-time.After(timeout):
stop <- true
return fmt.Errorf("garden did not shut down within %s", timeout)
}
}
func (r *GardenRunner) DestroyContainers() error {
lsOutput, err := r.cmd("find", r.DepotPath, "-maxdepth", "1", "-mindepth", "1", "-print0").Output() // ls does not use linebreaks
if err != nil {
return err
}
containerDirs := strings.Split(string(lsOutput), "\x00")
for _, dir := range containerDirs {
if dir == "" {
continue
}
err := r.cmd(
filepath.Join(r.RootPath, "linux", "destroy.sh"),
dir,
).Run()
if err != nil {
return err
}
}
return r.cmd("rm", "-rf", r.SnapshotsPath).Run()
}
func (r *GardenRunner) TearDown() error {
err := r.DestroyContainers()
if err != nil {
return err
}
return r.cmd("rm", "-rf", r.tmpdir).Run()
}
func (r *GardenRunner) NewClient() gordon.Client {
return gordon.NewClient(&gordon.ConnectionInfo{
Network: r.Network,
Addr: r.Addr,
})
}
func (r *GardenRunner) waitForStart(started chan<- bool, stop <-chan bool) {
for {
var err error
conn, dialErr := net.Dial(r.Network, r.Addr)
if dialErr == nil {
conn.Close()
}
err = dialErr
if err == nil {
started <- true
return
}
select {
case <-stop:
return
case <-time.After(100 * time.Millisecond):
}
}
}
func (r *GardenRunner) waitForStop(stopped chan<- bool, stop <-chan bool) {
for {
var err error
conn, dialErr := net.Dial(r.Network, r.Addr)
if dialErr == nil {
conn.Close()
}
err = dialErr
if err != nil {
stopped <- true
return
}
select {
case <-stop:
return
case <-time.After(100 * time.Millisecond):
}
}
}
clean up garden runner
package garden_runner
import (
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/onsi/ginkgo/config"
"github.com/vito/cmdtest"
"github.com/vito/gordon"
)
type GardenRunner struct {
Remote string
Network string
Addr string
DepotPath string
RootPath string
RootFSPath string
SnapshotsPath string
gardenBin string
gardenCmd *exec.Cmd
tmpdir string
}
func New(rootPath, rootFSPath, remote string) (*GardenRunner, error) {
tmpdir, err := ioutil.TempDir(os.TempDir(), "garden-temp-socker")
if err != nil {
return nil, err
}
runner := &GardenRunner{
Remote: remote,
Network: "unix",
Addr: filepath.Join(tmpdir, "warden.sock"),
RootPath: rootPath,
RootFSPath: rootFSPath,
}
return runner, runner.Prepare()
}
func (r *GardenRunner) cmd(command string, argv ...string) *exec.Cmd {
if r.Remote == "" {
return exec.Command(command, argv...)
} else {
args := []string{
"-tt", "-l", "root", r.Remote,
"shopt -s huponexit; " + command,
}
args = append(args, argv...)
return exec.Command("ssh", args...)
}
}
func (r *GardenRunner) Prepare() error {
r.tmpdir = fmt.Sprintf("/tmp/garden-%d-%d", time.Now().UnixNano(), config.GinkgoConfig.ParallelNode)
err := r.cmd("mkdir", r.tmpdir).Run()
if err != nil {
return err
}
compiled, err := cmdtest.Build("github.com/pivotal-cf-experimental/garden")
if err != nil {
return err
}
r.gardenBin = compiled
r.DepotPath = filepath.Join(r.tmpdir, "containers")
err = r.cmd("mkdir", "-m", "0755", r.DepotPath).Run()
if err != nil {
return err
}
r.SnapshotsPath = filepath.Join(r.tmpdir, "snapshots")
return r.cmd("mkdir", r.SnapshotsPath).Run()
}
func (r *GardenRunner) Start(argv ...string) error {
gardenArgs := argv
gardenArgs = append(
gardenArgs,
"--listenNetwork", r.Network,
"--listenAddr", r.Addr,
"--root", r.RootPath,
"--depot", r.DepotPath,
"--rootfs", r.RootFSPath,
"--snapshots", r.SnapshotsPath,
"--debug",
"--disableQuotas",
)
garden := r.cmd(r.gardenBin, gardenArgs...)
garden.Stdout = os.Stdout
garden.Stderr = os.Stderr
err := garden.Start()
if err != nil {
return err
}
started := make(chan bool, 1)
stop := make(chan bool, 1)
go r.waitForStart(started, stop)
timeout := 10 * time.Second
r.gardenCmd = garden
select {
case <-started:
return nil
case <-time.After(timeout):
stop <- true
return fmt.Errorf("garden did not come up within %s", timeout)
}
}
func (r *GardenRunner) Stop() error {
if r.gardenCmd == nil {
return nil
}
err := r.gardenCmd.Process.Signal(os.Interrupt)
if err != nil {
return err
}
stopped := make(chan bool, 1)
stop := make(chan bool, 1)
go r.waitForStop(stopped, stop)
timeout := 10 * time.Second
select {
case <-stopped:
r.gardenCmd = nil
return nil
case <-time.After(timeout):
stop <- true
return fmt.Errorf("garden did not shut down within %s", timeout)
}
}
func (r *GardenRunner) DestroyContainers() error {
lsOutput, err := r.cmd("find", r.DepotPath, "-maxdepth", "1", "-mindepth", "1", "-print0").Output() // ls does not use linebreaks
if err != nil {
return err
}
containerDirs := strings.Split(string(lsOutput), "\x00")
for _, dir := range containerDirs {
if dir == "" {
continue
}
err := r.cmd(
filepath.Join(r.RootPath, "linux", "destroy.sh"),
dir,
).Run()
if err != nil {
return err
}
}
return r.cmd("rm", "-rf", r.SnapshotsPath).Run()
}
func (r *GardenRunner) TearDown() error {
err := r.DestroyContainers()
if err != nil {
return err
}
return r.cmd("rm", "-rf", r.tmpdir).Run()
}
func (r *GardenRunner) NewClient() gordon.Client {
return gordon.NewClient(&gordon.ConnectionInfo{
Network: r.Network,
Addr: r.Addr,
})
}
func (r *GardenRunner) waitForStart(started chan<- bool, stop <-chan bool) {
for {
var err error
conn, dialErr := net.Dial(r.Network, r.Addr)
if dialErr == nil {
conn.Close()
}
err = dialErr
if err == nil {
started <- true
return
}
select {
case <-stop:
return
case <-time.After(100 * time.Millisecond):
}
}
}
func (r *GardenRunner) waitForStop(stopped chan<- bool, stop <-chan bool) {
for {
var err error
conn, dialErr := net.Dial(r.Network, r.Addr)
if dialErr == nil {
conn.Close()
}
err = dialErr
if err != nil {
stopped <- true
return
}
select {
case <-stop:
return
case <-time.After(100 * time.Millisecond):
}
}
}
|
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package integration
import (
"testing"
"time"
"github.com/u-root/u-root/pkg/qemu"
"github.com/u-root/u-root/pkg/testutil"
"github.com/u-root/u-root/pkg/uroot"
"github.com/u-root/u-root/pkg/vmtest"
)
func TestDhclient(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
network := qemu.NewNetwork()
_, scleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestDhclient_Server",
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "server"),
Devices: []qemu.Device{
network.NewVM(),
},
},
TestCmds: []string{
"ip link set eth0 up",
"ip addr add 192.168.0.1/24 dev eth0",
"ip route add 0.0.0.0/0 dev eth0",
"pxeserver",
},
})
defer scleanup()
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestDhclient_Client",
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
TestCmds: []string{
"dhclient -ipv6=false -v",
"ip a",
// Sleep so serial console output gets flushed. The expect library is racy.
"sleep 5",
"shutdown -h",
},
})
defer ccleanup()
if err := dhcpClient.Expect("Configured eth0 with IPv4 DHCP Lease"); err != nil {
t.Error(err)
}
if err := dhcpClient.Expect("inet 192.168.0.2"); err != nil {
t.Error(err)
}
}
// TestPxeboot runs a server and client to test pxebooting a node.
// TODO: FIX THIS TEST!
// Change the t.Logf below back to t.Errorf
func TestPxeboot(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
network := qemu.NewNetwork()
dhcpServer, scleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestPxeboot_Server",
BuildOpts: uroot.Opts{
ExtraFiles: []string{
"testdata/pxe:pxeroot",
},
},
TestCmds: []string{
"ip addr add 192.168.0.1/24 dev eth0",
"ip link set eth0 up",
"ip route add 0.0.0.0/0 dev eth0",
"ls -l /pxeroot",
"pxeserver -tftp-dir=/pxeroot",
},
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "server"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
})
defer scleanup()
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestPxeboot_Client",
BuildOpts: uroot.Opts{
// Specify commands to include because generic initramfs
// does not include cmds/boot.
Commands: uroot.BusyBoxCmds(
"github.com/u-root/u-root/cmds/core/init",
"github.com/u-root/u-root/cmds/core/elvish",
"github.com/u-root/u-root/cmds/core/ip",
"github.com/u-root/u-root/cmds/core/shutdown",
"github.com/u-root/u-root/cmds/core/sleep",
"github.com/u-root/u-root/cmds/boot/pxeboot",
),
},
TestCmds: []string{
"pxeboot --dry-run --no-load -v",
// Sleep so serial console output gets flushed. The expect library is racy.
"sleep 5",
"shutdown -h",
},
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
})
defer ccleanup()
if err := dhcpServer.Expect("starting file server"); err != nil {
t.Logf("File server: %v", err)
}
if err := dhcpClient.Expect("Got DHCPv4 lease on eth0:"); err != nil {
t.Logf("Lease %v:", err)
}
if err := dhcpClient.Expect("Boot URI: tftp://192.168.0.1/pxelinux.0"); err != nil {
t.Logf("Boot: %v", err)
}
}
func TestDhclientQEMU4(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
qemu.ArbitraryArgs{
"-device", "e1000,netdev=host0",
"-netdev", "user,id=host0,net=192.168.0.0/24,dhcpstart=192.168.0.10,ipv6=off",
},
},
},
Uinit: []string{
"dhclient -ipv6=false -v",
"ip a",
"sleep 5",
"shutdown -h",
},
})
defer ccleanup()
if err := dhcpClient.Expect("Configured eth0 with IPv4 DHCP Lease"); err != nil {
t.Errorf("%s: %v", testutil.NowLog(), err)
}
if err := dhcpClient.Expect("inet 192.168.0.10"); err != nil {
t.Errorf("%s: %v", testutil.NowLog(), err)
}
}
func TestQEMUDHCPTimesOut(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestQEMUDHCPTimesOut",
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 40 * time.Second,
},
TestCmds: []string{
// loopback should time out and it can't have configured anything.
"dhclient -v -retry 1 -timeout 10 lo",
"echo \"DHCP timed out\"",
// Sleep so serial console output gets flushed. The expect library is racy.
"sleep 5",
"shutdown -h",
},
})
defer ccleanup()
// Make sure that dhclient does not hang forever.
if err := dhcpClient.Expect("DHCP timed out"); err != nil {
t.Error(err)
}
}
dhclient: DHCP times out test works with v6
Signed-off-by: Chris Koch <cd2178739c1fb8f241acc6c7bd5abe02c150d7e5@google.com>
// Copyright 2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package integration
import (
"testing"
"time"
"github.com/u-root/u-root/pkg/qemu"
"github.com/u-root/u-root/pkg/testutil"
"github.com/u-root/u-root/pkg/uroot"
"github.com/u-root/u-root/pkg/vmtest"
)
func TestDhclient(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
network := qemu.NewNetwork()
_, scleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestDhclient_Server",
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "server"),
Devices: []qemu.Device{
network.NewVM(),
},
},
TestCmds: []string{
"ip link set eth0 up",
"ip addr add 192.168.0.1/24 dev eth0",
"ip route add 0.0.0.0/0 dev eth0",
"pxeserver",
},
})
defer scleanup()
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestDhclient_Client",
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
TestCmds: []string{
"dhclient -ipv6=false -v",
"ip a",
// Sleep so serial console output gets flushed. The expect library is racy.
"sleep 5",
"shutdown -h",
},
})
defer ccleanup()
if err := dhcpClient.Expect("Configured eth0 with IPv4 DHCP Lease"); err != nil {
t.Error(err)
}
if err := dhcpClient.Expect("inet 192.168.0.2"); err != nil {
t.Error(err)
}
}
// TestPxeboot runs a server and client to test pxebooting a node.
// TODO: FIX THIS TEST!
// Change the t.Logf below back to t.Errorf
func TestPxeboot(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
network := qemu.NewNetwork()
dhcpServer, scleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestPxeboot_Server",
BuildOpts: uroot.Opts{
ExtraFiles: []string{
"testdata/pxe:pxeroot",
},
},
TestCmds: []string{
"ip addr add 192.168.0.1/24 dev eth0",
"ip link set eth0 up",
"ip route add 0.0.0.0/0 dev eth0",
"ls -l /pxeroot",
"pxeserver -tftp-dir=/pxeroot",
},
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "server"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
})
defer scleanup()
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestPxeboot_Client",
BuildOpts: uroot.Opts{
// Specify commands to include because generic initramfs
// does not include cmds/boot.
Commands: uroot.BusyBoxCmds(
"github.com/u-root/u-root/cmds/core/init",
"github.com/u-root/u-root/cmds/core/elvish",
"github.com/u-root/u-root/cmds/core/ip",
"github.com/u-root/u-root/cmds/core/shutdown",
"github.com/u-root/u-root/cmds/core/sleep",
"github.com/u-root/u-root/cmds/boot/pxeboot",
),
},
TestCmds: []string{
"pxeboot --dry-run --no-load -v",
// Sleep so serial console output gets flushed. The expect library is racy.
"sleep 5",
"shutdown -h",
},
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
})
defer ccleanup()
if err := dhcpServer.Expect("starting file server"); err != nil {
t.Logf("File server: %v", err)
}
if err := dhcpClient.Expect("Got DHCPv4 lease on eth0:"); err != nil {
t.Logf("Lease %v:", err)
}
if err := dhcpClient.Expect("Boot URI: tftp://192.168.0.1/pxelinux.0"); err != nil {
t.Logf("Boot: %v", err)
}
}
func TestDhclientQEMU4(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
QEMUOpts: qemu.Options{
SerialOutput: vmtest.TestLineWriter(t, "client"),
Timeout: 30 * time.Second,
Devices: []qemu.Device{
qemu.ArbitraryArgs{
"-device", "e1000,netdev=host0",
"-netdev", "user,id=host0,net=192.168.0.0/24,dhcpstart=192.168.0.10,ipv6=off",
},
},
},
Uinit: []string{
"dhclient -ipv6=false -v",
"ip a",
"sleep 5",
"shutdown -h",
},
})
defer ccleanup()
if err := dhcpClient.Expect("Configured eth0 with IPv4 DHCP Lease"); err != nil {
t.Errorf("%s: %v", testutil.NowLog(), err)
}
if err := dhcpClient.Expect("inet 192.168.0.10"); err != nil {
t.Errorf("%s: %v", testutil.NowLog(), err)
}
}
func TestQEMUDHCPTimesOut(t *testing.T) {
// TODO: support arm
if vmtest.TestArch() != "amd64" {
t.Skipf("test not supported on %s", vmtest.TestArch())
}
network := qemu.NewNetwork()
dhcpClient, ccleanup := vmtest.QEMUTest(t, &vmtest.Options{
Name: "TestQEMUDHCPTimesOut",
QEMUOpts: qemu.Options{
Timeout: 50 * time.Second,
Devices: []qemu.Device{
network.NewVM(),
},
},
Uinit: []string{
"dhclient -v -retry 2 -timeout 10",
"echo \"DHCP timed out\"",
"shutdown -h",
},
})
defer ccleanup()
if err := dhcpClient.Expect("Could not configure eth0 for IPv"); err != nil {
t.Error(err)
}
if err := dhcpClient.Expect("Could not configure eth0 for IPv"); err != nil {
t.Error(err)
}
if err := dhcpClient.Expect("DHCP timed out"); err != nil {
t.Error(err)
}
}
|
package eval
// Builtin functions.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unicode/utf8"
"github.com/elves/elvish/parse"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value, map[string]Value)
}
var _ FnValue = &BuiltinFn{}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "<builtin " + b.Name + ">"
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value, opts map[string]Value) {
b.Impl(ec, args, opts)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
// Fundamental predicates
&BuiltinFn{"true", nop},
&BuiltinFn{"false", falseFn},
// Introspection
&BuiltinFn{"kind-of", kindOf},
// Value output
&BuiltinFn{"put", put},
&BuiltinFn{"unpack", WrapFn(unpack)},
// Bytes output
&BuiltinFn{"print", WrapFn(print, OptSpec{"sep", String(" ")})},
&BuiltinFn{"echo", WrapFn(echo, OptSpec{"sep", String(" ")})},
&BuiltinFn{"pprint", pprint},
// Bytes to value
&BuiltinFn{"slurp", WrapFn(slurp)},
&BuiltinFn{"from-lines", WrapFn(fromLines)},
&BuiltinFn{"from-json", WrapFn(fromJSON)},
// Value to bytes
&BuiltinFn{"to-lines", WrapFn(toLines)},
&BuiltinFn{"to-json", WrapFn(toJSON)},
// String
&BuiltinFn{"joins", WrapFn(joins)},
&BuiltinFn{"splits", WrapFn(splits, OptSpec{"sep", String("")})},
&BuiltinFn{"has-prefix", WrapFn(hasPrefix)},
&BuiltinFn{"has-suffix", WrapFn(hasSuffix)},
// String comparison
&BuiltinFn{"<s",
wrapStrCompare(func(a, b string) bool { return a < b })},
&BuiltinFn{"<=s",
wrapStrCompare(func(a, b string) bool { return a <= b })},
&BuiltinFn{"==s",
wrapStrCompare(func(a, b string) bool { return a == b })},
&BuiltinFn{"!=s",
wrapStrCompare(func(a, b string) bool { return a != b })},
&BuiltinFn{">s",
wrapStrCompare(func(a, b string) bool { return a > b })},
&BuiltinFn{">=s",
wrapStrCompare(func(a, b string) bool { return a >= b })},
// Exception and control
&BuiltinFn{"fail", WrapFn(fail)},
&BuiltinFn{"multi-error", WrapFn(multiErrorFn)},
&BuiltinFn{"return", WrapFn(returnFn)},
&BuiltinFn{"break", WrapFn(breakFn)},
&BuiltinFn{"continue", WrapFn(continueFn)},
// Functional primitives
&BuiltinFn{"constantly", constantly},
&BuiltinFn{"each", WrapFn(each)},
&BuiltinFn{"peach", WrapFn(peach)},
// Sequence primitives
&BuiltinFn{"take", WrapFn(take)},
&BuiltinFn{"range", rangeFn},
&BuiltinFn{"count", count},
// eawk
&BuiltinFn{"eawk", WrapFn(eawk)},
// Directory
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", WrapFn(dirs)},
// Path
&BuiltinFn{"path-abs", wrapStringToStringError(filepath.Abs)},
&BuiltinFn{"path-base", wrapStringToString(filepath.Base)},
&BuiltinFn{"path-clean", wrapStringToString(filepath.Clean)},
&BuiltinFn{"path-dir", wrapStringToString(filepath.Dir)},
&BuiltinFn{"path-ext", wrapStringToString(filepath.Ext)},
&BuiltinFn{"eval-symlinks", wrapStringToStringError(filepath.EvalSymlinks)},
&BuiltinFn{"tilde-abbr", WrapFn(tildeAbbr)},
&BuiltinFn{"source", WrapFn(source)},
// Arithmetics
&BuiltinFn{"+", WrapFn(plus)},
&BuiltinFn{"-", WrapFn(minus)},
&BuiltinFn{"*", WrapFn(times)},
&BuiltinFn{"/", slash},
&BuiltinFn{"^", WrapFn(pow)},
&BuiltinFn{"%", WrapFn(mod)},
// Random
&BuiltinFn{"rand", WrapFn(randFn)},
&BuiltinFn{"randint", WrapFn(randint)},
// Numerical comparison
&BuiltinFn{"<",
wrapNumCompare(func(a, b float64) bool { return a < b })},
&BuiltinFn{"<=",
wrapNumCompare(func(a, b float64) bool { return a <= b })},
&BuiltinFn{"==",
wrapNumCompare(func(a, b float64) bool { return a == b })},
&BuiltinFn{"!=",
wrapNumCompare(func(a, b float64) bool { return a != b })},
&BuiltinFn{">",
wrapNumCompare(func(a, b float64) bool { return a > b })},
&BuiltinFn{">=",
wrapNumCompare(func(a, b float64) bool { return a >= b })},
// Generic identity and equality
&BuiltinFn{"is", is},
&BuiltinFn{"eq", eq},
// String operations
&BuiltinFn{"ord", WrapFn(ord)},
&BuiltinFn{"base", WrapFn(base)},
&BuiltinFn{"-override-wcwidth", WrapFn(overrideWcwidth)},
&BuiltinFn{"wcswidth", WrapFn(wcswidth)},
&BuiltinFn{"resolve", WrapFn(resolveFn)},
// bool
&BuiltinFn{"bool", WrapFn(boolFn)},
// File and pipe
&BuiltinFn{"fopen", WrapFn(fopen)},
&BuiltinFn{"fclose", WrapFn(fclose)},
&BuiltinFn{"pipe", WrapFn(pipe)},
&BuiltinFn{"prclose", WrapFn(prclose)},
&BuiltinFn{"pwclose", WrapFn(pwclose)},
&BuiltinFn{"esleep", WrapFn(sleep)},
&BuiltinFn{"fg", WrapFn(fg)},
&BuiltinFn{"exec", WrapFn(exec)},
&BuiltinFn{"exit", WrapFn(exit)},
&BuiltinFn{"-stack", WrapFn(_stack)},
&BuiltinFn{"-log", WrapFn(_log)},
&BuiltinFn{"-time", WrapFn(_time)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
// For rand and randint.
rand.Seed(time.Now().UTC().UnixNano())
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
iterateType = reflect.TypeOf((func(func(Value)))(nil))
stringValueType = reflect.TypeOf(String(""))
)
// WrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature of
// the inner function and option specifications. The inner function must accept
// EvalCtx* as the first argument, followed by options, followed by arguments.
func WrapFn(inner interface{}, optSpecs ...OptSpec) func(*EvalCtx, []Value, map[string]Value) {
funcType := reflect.TypeOf(inner)
if funcType.In(0) != evalCtxType {
panic("bad func to wrap, first argument not *EvalCtx")
}
nopts := len(optSpecs)
optsTo := nopts + 1
optSet := NewOptSet(optSpecs...)
// Range occupied by fixed arguments in the argument list to inner.
fixedArgsFrom, fixedArgsTo := optsTo, funcType.NumIn()
isVariadic := funcType.IsVariadic()
hasOptionalIterate := false
var variadicType reflect.Type
if isVariadic {
fixedArgsTo--
variadicType = funcType.In(funcType.NumIn() - 1).Elem()
if !supportedArgType(variadicType) {
panic(fmt.Sprintf("bad func to wrap, variadic argument type %s unsupported", variadicType))
}
} else if funcType.In(funcType.NumIn()-1) == iterateType {
fixedArgsTo--
hasOptionalIterate = true
}
for i := 1; i < fixedArgsTo; i++ {
if !supportedArgType(funcType.In(i)) {
panic(fmt.Sprintf("bad func to wrap, argument type %s unsupported", funcType.In(i)))
}
}
nFixedArgs := fixedArgsTo - fixedArgsFrom
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
// Check arity of arguments.
if isVariadic {
if len(args) < nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d or more arguments, got %d", nFixedArgs, len(args)))
}
} else if hasOptionalIterate {
if len(args) < nFixedArgs || len(args) > nFixedArgs+1 {
throw(fmt.Errorf("arity mismatch: want %d or %d arguments, got %d", nFixedArgs, nFixedArgs+1, len(args)))
}
} else if len(args) != nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d arguments, got %d", nFixedArgs, len(args)))
}
convertedArgs := make([]reflect.Value, 1+nopts+len(args))
convertedArgs[0] = reflect.ValueOf(ec)
// Convert and fill options.
var err error
optValues := optSet.MustPick(opts)
for i, v := range optValues {
convertedArgs[1+i], err = convertArg(v, funcType.In(1+i))
if err != nil {
throw(errors.New("bad option " + parse.Quote(optSet.optSpecs[i].Name) + ": " + err.Error()))
}
}
// Convert and fill fixed arguments.
for i, arg := range args[:nFixedArgs] {
convertedArgs[fixedArgsFrom+i], err = convertArg(arg, funcType.In(fixedArgsFrom+i))
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
if isVariadic {
for i, arg := range args[nFixedArgs:] {
convertedArgs[fixedArgsTo+i], err = convertArg(arg, variadicType)
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
} else if hasOptionalIterate {
var iterate func(func(Value))
if len(args) == nFixedArgs {
// No Iterator specified in arguments. Use input.
// Since convertedArgs was created according to the size of the
// actual argument list, we now an empty element to make room
// for this additional iterator argument.
convertedArgs = append(convertedArgs, reflect.Value{})
iterate = ec.IterateInputs
} else {
iterator, ok := args[nFixedArgs].(Iterator)
if !ok {
throw(errors.New("bad argument: need iterator, got " + args[nFixedArgs].Kind()))
}
iterate = func(f func(Value)) {
iterator.Iterate(func(v Value) bool {
f(v)
return true
})
}
}
convertedArgs[fixedArgsTo] = reflect.ValueOf(iterate)
}
reflect.ValueOf(inner).Call(convertedArgs)
}
}
func supportedArgType(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArg(arg Value, wantType reflect.Type) (reflect.Value, error) {
var converted interface{}
var err error
switch wantType.Kind() {
case reflect.String:
if wantType == stringValueType {
converted = String(ToString(arg))
} else {
converted = ToString(arg)
}
case reflect.Int:
converted, err = toInt(arg)
case reflect.Float64:
converted, err = toFloat(arg)
default:
if reflect.TypeOf(arg).ConvertibleTo(wantType) {
converted = arg
} else {
err = fmt.Errorf("need %s", wantType.Name())
}
}
return reflect.ValueOf(converted), err
}
func wrapStringToString(f func(string) string) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
ec.ports[1].Chan <- String(f(s))
}
}
func wrapStringToStringError(f func(string) (string, error)) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
result, err := f(s)
maybeThrow(err)
ec.ports[1].Chan <- String(result)
}
}
func wrapStrCompare(cmp func(a, b string) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for _, a := range args {
if _, ok := a.(String); !ok {
throw(ErrArgs)
}
}
for i := 0; i < len(args)-1; i++ {
if !cmp(string(args[i].(String)), string(args[i+1].(String))) {
ec.falsify()
return
}
}
}
}
func wrapNumCompare(cmp func(a, b float64) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
floats := make([]float64, len(args))
for i, a := range args {
f, err := toFloat(a)
maybeThrow(err)
floats[i] = f
}
for i := 0; i < len(floats)-1; i++ {
if !cmp(floats[i], floats[i+1]) {
ec.falsify()
return
}
}
}
}
var errMustBeOneString = errors.New("must be one string argument")
func mustGetOneString(args []Value) string {
if len(args) != 1 {
throw(errMustBeOneString)
}
s, ok := args[0].(String)
if !ok {
throw(errMustBeOneString)
}
return string(s)
}
func nop(ec *EvalCtx, args []Value, opts map[string]Value) {
}
func falseFn(ec *EvalCtx, args []Value, opts map[string]Value) {
ec.falsify()
}
func put(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func kindOf(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, sepv String, args ...string) {
out := ec.ports[1].File
sep := string(sepv)
for i, arg := range args {
if i > 0 {
out.WriteString(sep)
}
out.WriteString(arg)
}
}
func echo(ec *EvalCtx, sep String, args ...string) {
print(ec, sep, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func slurp(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
all, err := ioutil.ReadAll(in)
if err != nil {
b, err := sys.GetNonblock(0)
fmt.Println("stdin is nonblock:", b, err)
fmt.Println("stdin is stdin:", in == os.Stdin)
}
maybeThrow(err)
out <- String(string(all))
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
linesToChan(in, out)
}
func toLines(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
iterate(func(v Value) {
fmt.Fprintln(out, ToString(v))
})
}
// unpack puts each element of the argument.
func unpack(ec *EvalCtx, v IteratorValue) {
out := ec.ports[1].Chan
v.Iterate(func(e Value) bool {
out <- e
return true
})
}
// joins joins all input strings with a delimiter.
func joins(ec *EvalCtx, sep String, iterate func(func(Value))) {
var buf bytes.Buffer
iterate(func(v Value) {
if s, ok := v.(String); ok {
if buf.Len() > 0 {
buf.WriteString(string(sep))
}
buf.WriteString(string(s))
} else {
throwf("join wants string input, got %s", v.Kind())
}
})
out := ec.ports[1].Chan
out <- String(buf.String())
}
// splits splits an argument strings by a delimiter and writes all pieces.
func splits(ec *EvalCtx, sep, s String) {
out := ec.ports[1].Chan
parts := strings.Split(string(s), string(sep))
for _, p := range parts {
out <- String(p)
}
}
func hasPrefix(ec *EvalCtx, s, prefix String) {
if !strings.HasPrefix(string(s), string(prefix)) {
ec.falsify()
}
}
func hasSuffix(ec *EvalCtx, s, suffix String) {
if !strings.HasSuffix(string(s), string(suffix)) {
ec.falsify()
}
}
// toJSON converts a stream of Value's to JSON data.
func toJSON(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
enc := json.NewEncoder(out)
iterate(func(v Value) {
err := enc.Encode(v)
maybeThrow(err)
})
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = DevNullClosedChan
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
// peach takes a single closure and applies it to all input values in parallel.
func peach(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
var w sync.WaitGroup
broken := false
var err error
iterate(func(v Value) {
if broken || err != nil {
return
}
w.Add(1)
go func() {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = DevNullClosedChan
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
err = ex
}
w.Done()
}()
})
w.Wait()
maybeThrow(err)
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
line, ok := v.(String)
if !ok {
throw(ErrInput)
}
args := []Value{line}
for _, field := range eawkWordSep.Split(strings.Trim(string(line), " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
// TODO: Close port 0 of newec.
ex := newec.PCall(f, args, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
func constantly(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
// XXX Repr of this fn is not right
out <- &BuiltinFn{
"created by constantly",
func(ec *EvalCtx, a []Value, o map[string]Value) {
TakeNoOpt(o)
if len(a) != 0 {
throw(ErrArgs)
}
out := ec.ports[1].Chan
for _, v := range args {
out <- v
}
},
}
}
func cd(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.Store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.Store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.Store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
if _, ok := arg.(String); !ok {
return 0, fmt.Errorf("must be string")
}
s := string(arg.(String))
num, err := strconv.ParseFloat(s, 64)
if err != nil {
num, err2 := strconv.ParseInt(s, 0, 64)
if err2 != nil {
return 0, err
}
return float64(num), nil
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseInt(string(arg.(String)), 0, 0)
if err != nil {
return 0, err
}
return int(num), nil
}
func toRune(arg Value) (rune, error) {
ss, ok := arg.(String)
if !ok {
return -1, fmt.Errorf("must be string")
}
s := string(ss)
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError {
return -1, fmt.Errorf("string is not valid UTF-8")
}
if size != len(s) {
return -1, fmt.Errorf("string has multiple runes")
}
return r, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
if len(nums) == 0 {
// Unary -
sum = -sum
} else {
for _, f := range nums {
sum -= f
}
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func slash(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) == 0 {
// cd /
cdInner("/", ec)
return
}
// Division
wrappedDivide(ec, args, opts)
}
var wrappedDivide = WrapFn(divide)
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
func mod(ec *EvalCtx, a, b int) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(a % b))
}
func randFn(ec *EvalCtx) {
out := ec.ports[1].Chan
out <- String(fmt.Sprint(rand.Float64()))
}
func randint(ec *EvalCtx, low, high int) {
if low >= high {
throw(ErrArgs)
}
out := ec.ports[1].Chan
i := low + rand.Intn(high-low)
out <- String(strconv.Itoa(i))
}
func ord(ec *EvalCtx, s string) {
out := ec.ports[1].Chan
for _, r := range s {
out <- String(fmt.Sprintf("0x%x", r))
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
func rangeFn(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var lower, upper int
step := 1
var err error
switch len(args) {
case 1:
upper, err = toInt(args[0])
maybeThrow(err)
case 2, 3:
lower, err = toInt(args[0])
maybeThrow(err)
upper, err = toInt(args[1])
maybeThrow(err)
if len(args) == 3 {
step, err = toInt(args[2])
maybeThrow(err)
}
default:
throw(ErrArgs)
}
out := ec.ports[1].Chan
for i := lower; i < upper; i += step {
out <- String(strconv.Itoa(i))
}
}
func boolFn(ec *EvalCtx, v Value) {
out := ec.ports[1].Chan
out <- Bool(ToBool(v))
}
func is(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
ec.falsify()
return
}
}
}
func eq(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
ec.falsify()
return
}
}
}
func resolveFn(ec *EvalCtx, cmd String) {
out := ec.ports[1].Chan
out <- resolve(string(cmd), ec)
}
func take(ec *EvalCtx, n int, iterate func(func(Value))) {
out := ec.ports[1].Chan
i := 0
iterate(func(v Value) {
if i < n {
out <- v
}
i++
})
}
func count(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var n int
switch len(args) {
case 0:
// Count inputs.
ec.IterateInputs(func(Value) {
n++
})
case 1:
// Get length of argument.
v := args[0]
if lener, ok := v.(Lener); ok {
n = lener.Len()
} else if iterator, ok := v.(Iterator); ok {
iterator.Iterate(func(Value) bool {
n++
return true
})
} else {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
default:
throw(errors.New("want 0 or 1 argument"))
}
ec.ports[1].Chan <- String(strconv.Itoa(n))
}
func overrideWcwidth(ec *EvalCtx, s String, w int) {
r, err := toRune(s)
maybeThrow(err)
util.OverrideWcwidth(r, w)
}
func wcswidth(ec *EvalCtx, s String) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(util.Wcswidth(string(s))))
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
// TODO find command name
errors[i] = Error{NewExternalCmdExit(fmt.Sprintf("(pid %d)", pid), ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func fopen(ec *EvalCtx, name string) {
// TODO support opening files for writing etc as well.
out := ec.ports[1].Chan
f, err := os.Open(name)
maybeThrow(err)
out <- File{f}
}
func pipe(ec *EvalCtx) {
r, w, err := os.Pipe()
out := ec.ports[1].Chan
maybeThrow(err)
out <- Pipe{r, w}
}
func fclose(ec *EvalCtx, f File) { maybeThrow(f.inner.Close()) }
func prclose(ec *EvalCtx, p Pipe) { maybeThrow(p.r.Close()) }
func pwclose(ec *EvalCtx, p Pipe) { maybeThrow(p.w.Close()) }
func sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.Interrupts():
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _time(ec *EvalCtx, f FnValue) {
t0 := time.Now()
f.Call(ec, NoArgs, NoOpts)
t1 := time.Now()
dt := t1.Sub(t0)
fmt.Fprintln(ec.ports[1].File, dt)
}
func exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
preExit(ec)
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
func exit(ec *EvalCtx, args ...int) {
doexit := func(i int) {
preExit(ec)
os.Exit(i)
}
switch len(args) {
case 0:
doexit(0)
case 1:
doexit(args[0])
default:
throw(ErrArgs)
}
}
func preExit(ec *EvalCtx) {
err := ec.Store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
Add an experimental -ifaddrs builtin.
package eval
// Builtin functions.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unicode/utf8"
"github.com/elves/elvish/parse"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value, map[string]Value)
}
var _ FnValue = &BuiltinFn{}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "<builtin " + b.Name + ">"
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value, opts map[string]Value) {
b.Impl(ec, args, opts)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
// Fundamental predicates
&BuiltinFn{"true", nop},
&BuiltinFn{"false", falseFn},
// Introspection
&BuiltinFn{"kind-of", kindOf},
// Value output
&BuiltinFn{"put", put},
&BuiltinFn{"unpack", WrapFn(unpack)},
// Bytes output
&BuiltinFn{"print", WrapFn(print, OptSpec{"sep", String(" ")})},
&BuiltinFn{"echo", WrapFn(echo, OptSpec{"sep", String(" ")})},
&BuiltinFn{"pprint", pprint},
// Bytes to value
&BuiltinFn{"slurp", WrapFn(slurp)},
&BuiltinFn{"from-lines", WrapFn(fromLines)},
&BuiltinFn{"from-json", WrapFn(fromJSON)},
// Value to bytes
&BuiltinFn{"to-lines", WrapFn(toLines)},
&BuiltinFn{"to-json", WrapFn(toJSON)},
// String
&BuiltinFn{"joins", WrapFn(joins)},
&BuiltinFn{"splits", WrapFn(splits, OptSpec{"sep", String("")})},
&BuiltinFn{"has-prefix", WrapFn(hasPrefix)},
&BuiltinFn{"has-suffix", WrapFn(hasSuffix)},
// String comparison
&BuiltinFn{"<s",
wrapStrCompare(func(a, b string) bool { return a < b })},
&BuiltinFn{"<=s",
wrapStrCompare(func(a, b string) bool { return a <= b })},
&BuiltinFn{"==s",
wrapStrCompare(func(a, b string) bool { return a == b })},
&BuiltinFn{"!=s",
wrapStrCompare(func(a, b string) bool { return a != b })},
&BuiltinFn{">s",
wrapStrCompare(func(a, b string) bool { return a > b })},
&BuiltinFn{">=s",
wrapStrCompare(func(a, b string) bool { return a >= b })},
// Exception and control
&BuiltinFn{"fail", WrapFn(fail)},
&BuiltinFn{"multi-error", WrapFn(multiErrorFn)},
&BuiltinFn{"return", WrapFn(returnFn)},
&BuiltinFn{"break", WrapFn(breakFn)},
&BuiltinFn{"continue", WrapFn(continueFn)},
// Functional primitives
&BuiltinFn{"constantly", constantly},
&BuiltinFn{"each", WrapFn(each)},
&BuiltinFn{"peach", WrapFn(peach)},
// Sequence primitives
&BuiltinFn{"take", WrapFn(take)},
&BuiltinFn{"range", rangeFn},
&BuiltinFn{"count", count},
// eawk
&BuiltinFn{"eawk", WrapFn(eawk)},
// Directory
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", WrapFn(dirs)},
// Path
&BuiltinFn{"path-abs", wrapStringToStringError(filepath.Abs)},
&BuiltinFn{"path-base", wrapStringToString(filepath.Base)},
&BuiltinFn{"path-clean", wrapStringToString(filepath.Clean)},
&BuiltinFn{"path-dir", wrapStringToString(filepath.Dir)},
&BuiltinFn{"path-ext", wrapStringToString(filepath.Ext)},
&BuiltinFn{"eval-symlinks", wrapStringToStringError(filepath.EvalSymlinks)},
&BuiltinFn{"tilde-abbr", WrapFn(tildeAbbr)},
&BuiltinFn{"source", WrapFn(source)},
// Arithmetics
&BuiltinFn{"+", WrapFn(plus)},
&BuiltinFn{"-", WrapFn(minus)},
&BuiltinFn{"*", WrapFn(times)},
&BuiltinFn{"/", slash},
&BuiltinFn{"^", WrapFn(pow)},
&BuiltinFn{"%", WrapFn(mod)},
// Random
&BuiltinFn{"rand", WrapFn(randFn)},
&BuiltinFn{"randint", WrapFn(randint)},
// Numerical comparison
&BuiltinFn{"<",
wrapNumCompare(func(a, b float64) bool { return a < b })},
&BuiltinFn{"<=",
wrapNumCompare(func(a, b float64) bool { return a <= b })},
&BuiltinFn{"==",
wrapNumCompare(func(a, b float64) bool { return a == b })},
&BuiltinFn{"!=",
wrapNumCompare(func(a, b float64) bool { return a != b })},
&BuiltinFn{">",
wrapNumCompare(func(a, b float64) bool { return a > b })},
&BuiltinFn{">=",
wrapNumCompare(func(a, b float64) bool { return a >= b })},
// Generic identity and equality
&BuiltinFn{"is", is},
&BuiltinFn{"eq", eq},
// String operations
&BuiltinFn{"ord", WrapFn(ord)},
&BuiltinFn{"base", WrapFn(base)},
&BuiltinFn{"-override-wcwidth", WrapFn(overrideWcwidth)},
&BuiltinFn{"wcswidth", WrapFn(wcswidth)},
&BuiltinFn{"resolve", WrapFn(resolveFn)},
// bool
&BuiltinFn{"bool", WrapFn(boolFn)},
// File and pipe
&BuiltinFn{"fopen", WrapFn(fopen)},
&BuiltinFn{"fclose", WrapFn(fclose)},
&BuiltinFn{"pipe", WrapFn(pipe)},
&BuiltinFn{"prclose", WrapFn(prclose)},
&BuiltinFn{"pwclose", WrapFn(pwclose)},
&BuiltinFn{"esleep", WrapFn(sleep)},
&BuiltinFn{"fg", WrapFn(fg)},
&BuiltinFn{"exec", WrapFn(exec)},
&BuiltinFn{"exit", WrapFn(exit)},
&BuiltinFn{"-stack", WrapFn(_stack)},
&BuiltinFn{"-log", WrapFn(_log)},
&BuiltinFn{"-time", WrapFn(_time)},
&BuiltinFn{"-ifaddrs", WrapFn(_ifaddrs)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
// For rand and randint.
rand.Seed(time.Now().UTC().UnixNano())
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
iterateType = reflect.TypeOf((func(func(Value)))(nil))
stringValueType = reflect.TypeOf(String(""))
)
// WrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature of
// the inner function and option specifications. The inner function must accept
// EvalCtx* as the first argument, followed by options, followed by arguments.
func WrapFn(inner interface{}, optSpecs ...OptSpec) func(*EvalCtx, []Value, map[string]Value) {
funcType := reflect.TypeOf(inner)
if funcType.In(0) != evalCtxType {
panic("bad func to wrap, first argument not *EvalCtx")
}
nopts := len(optSpecs)
optsTo := nopts + 1
optSet := NewOptSet(optSpecs...)
// Range occupied by fixed arguments in the argument list to inner.
fixedArgsFrom, fixedArgsTo := optsTo, funcType.NumIn()
isVariadic := funcType.IsVariadic()
hasOptionalIterate := false
var variadicType reflect.Type
if isVariadic {
fixedArgsTo--
variadicType = funcType.In(funcType.NumIn() - 1).Elem()
if !supportedArgType(variadicType) {
panic(fmt.Sprintf("bad func to wrap, variadic argument type %s unsupported", variadicType))
}
} else if funcType.In(funcType.NumIn()-1) == iterateType {
fixedArgsTo--
hasOptionalIterate = true
}
for i := 1; i < fixedArgsTo; i++ {
if !supportedArgType(funcType.In(i)) {
panic(fmt.Sprintf("bad func to wrap, argument type %s unsupported", funcType.In(i)))
}
}
nFixedArgs := fixedArgsTo - fixedArgsFrom
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
// Check arity of arguments.
if isVariadic {
if len(args) < nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d or more arguments, got %d", nFixedArgs, len(args)))
}
} else if hasOptionalIterate {
if len(args) < nFixedArgs || len(args) > nFixedArgs+1 {
throw(fmt.Errorf("arity mismatch: want %d or %d arguments, got %d", nFixedArgs, nFixedArgs+1, len(args)))
}
} else if len(args) != nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d arguments, got %d", nFixedArgs, len(args)))
}
convertedArgs := make([]reflect.Value, 1+nopts+len(args))
convertedArgs[0] = reflect.ValueOf(ec)
// Convert and fill options.
var err error
optValues := optSet.MustPick(opts)
for i, v := range optValues {
convertedArgs[1+i], err = convertArg(v, funcType.In(1+i))
if err != nil {
throw(errors.New("bad option " + parse.Quote(optSet.optSpecs[i].Name) + ": " + err.Error()))
}
}
// Convert and fill fixed arguments.
for i, arg := range args[:nFixedArgs] {
convertedArgs[fixedArgsFrom+i], err = convertArg(arg, funcType.In(fixedArgsFrom+i))
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
if isVariadic {
for i, arg := range args[nFixedArgs:] {
convertedArgs[fixedArgsTo+i], err = convertArg(arg, variadicType)
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
} else if hasOptionalIterate {
var iterate func(func(Value))
if len(args) == nFixedArgs {
// No Iterator specified in arguments. Use input.
// Since convertedArgs was created according to the size of the
// actual argument list, we now an empty element to make room
// for this additional iterator argument.
convertedArgs = append(convertedArgs, reflect.Value{})
iterate = ec.IterateInputs
} else {
iterator, ok := args[nFixedArgs].(Iterator)
if !ok {
throw(errors.New("bad argument: need iterator, got " + args[nFixedArgs].Kind()))
}
iterate = func(f func(Value)) {
iterator.Iterate(func(v Value) bool {
f(v)
return true
})
}
}
convertedArgs[fixedArgsTo] = reflect.ValueOf(iterate)
}
reflect.ValueOf(inner).Call(convertedArgs)
}
}
func supportedArgType(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArg(arg Value, wantType reflect.Type) (reflect.Value, error) {
var converted interface{}
var err error
switch wantType.Kind() {
case reflect.String:
if wantType == stringValueType {
converted = String(ToString(arg))
} else {
converted = ToString(arg)
}
case reflect.Int:
converted, err = toInt(arg)
case reflect.Float64:
converted, err = toFloat(arg)
default:
if reflect.TypeOf(arg).ConvertibleTo(wantType) {
converted = arg
} else {
err = fmt.Errorf("need %s", wantType.Name())
}
}
return reflect.ValueOf(converted), err
}
func wrapStringToString(f func(string) string) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
ec.ports[1].Chan <- String(f(s))
}
}
func wrapStringToStringError(f func(string) (string, error)) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
result, err := f(s)
maybeThrow(err)
ec.ports[1].Chan <- String(result)
}
}
func wrapStrCompare(cmp func(a, b string) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for _, a := range args {
if _, ok := a.(String); !ok {
throw(ErrArgs)
}
}
for i := 0; i < len(args)-1; i++ {
if !cmp(string(args[i].(String)), string(args[i+1].(String))) {
ec.falsify()
return
}
}
}
}
func wrapNumCompare(cmp func(a, b float64) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
floats := make([]float64, len(args))
for i, a := range args {
f, err := toFloat(a)
maybeThrow(err)
floats[i] = f
}
for i := 0; i < len(floats)-1; i++ {
if !cmp(floats[i], floats[i+1]) {
ec.falsify()
return
}
}
}
}
var errMustBeOneString = errors.New("must be one string argument")
func mustGetOneString(args []Value) string {
if len(args) != 1 {
throw(errMustBeOneString)
}
s, ok := args[0].(String)
if !ok {
throw(errMustBeOneString)
}
return string(s)
}
func nop(ec *EvalCtx, args []Value, opts map[string]Value) {
}
func falseFn(ec *EvalCtx, args []Value, opts map[string]Value) {
ec.falsify()
}
func put(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func kindOf(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, sepv String, args ...string) {
out := ec.ports[1].File
sep := string(sepv)
for i, arg := range args {
if i > 0 {
out.WriteString(sep)
}
out.WriteString(arg)
}
}
func echo(ec *EvalCtx, sep String, args ...string) {
print(ec, sep, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func slurp(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
all, err := ioutil.ReadAll(in)
if err != nil {
b, err := sys.GetNonblock(0)
fmt.Println("stdin is nonblock:", b, err)
fmt.Println("stdin is stdin:", in == os.Stdin)
}
maybeThrow(err)
out <- String(string(all))
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
linesToChan(in, out)
}
func toLines(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
iterate(func(v Value) {
fmt.Fprintln(out, ToString(v))
})
}
// unpack puts each element of the argument.
func unpack(ec *EvalCtx, v IteratorValue) {
out := ec.ports[1].Chan
v.Iterate(func(e Value) bool {
out <- e
return true
})
}
// joins joins all input strings with a delimiter.
func joins(ec *EvalCtx, sep String, iterate func(func(Value))) {
var buf bytes.Buffer
iterate(func(v Value) {
if s, ok := v.(String); ok {
if buf.Len() > 0 {
buf.WriteString(string(sep))
}
buf.WriteString(string(s))
} else {
throwf("join wants string input, got %s", v.Kind())
}
})
out := ec.ports[1].Chan
out <- String(buf.String())
}
// splits splits an argument strings by a delimiter and writes all pieces.
func splits(ec *EvalCtx, sep, s String) {
out := ec.ports[1].Chan
parts := strings.Split(string(s), string(sep))
for _, p := range parts {
out <- String(p)
}
}
func hasPrefix(ec *EvalCtx, s, prefix String) {
if !strings.HasPrefix(string(s), string(prefix)) {
ec.falsify()
}
}
func hasSuffix(ec *EvalCtx, s, suffix String) {
if !strings.HasSuffix(string(s), string(suffix)) {
ec.falsify()
}
}
// toJSON converts a stream of Value's to JSON data.
func toJSON(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
enc := json.NewEncoder(out)
iterate(func(v Value) {
err := enc.Encode(v)
maybeThrow(err)
})
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = DevNullClosedChan
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
// peach takes a single closure and applies it to all input values in parallel.
func peach(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
var w sync.WaitGroup
broken := false
var err error
iterate(func(v Value) {
if broken || err != nil {
return
}
w.Add(1)
go func() {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = DevNullClosedChan
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
err = ex
}
w.Done()
}()
})
w.Wait()
maybeThrow(err)
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
line, ok := v.(String)
if !ok {
throw(ErrInput)
}
args := []Value{line}
for _, field := range eawkWordSep.Split(strings.Trim(string(line), " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
// TODO: Close port 0 of newec.
ex := newec.PCall(f, args, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
func constantly(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
// XXX Repr of this fn is not right
out <- &BuiltinFn{
"created by constantly",
func(ec *EvalCtx, a []Value, o map[string]Value) {
TakeNoOpt(o)
if len(a) != 0 {
throw(ErrArgs)
}
out := ec.ports[1].Chan
for _, v := range args {
out <- v
}
},
}
}
func cd(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.Store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.Store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.Store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
if _, ok := arg.(String); !ok {
return 0, fmt.Errorf("must be string")
}
s := string(arg.(String))
num, err := strconv.ParseFloat(s, 64)
if err != nil {
num, err2 := strconv.ParseInt(s, 0, 64)
if err2 != nil {
return 0, err
}
return float64(num), nil
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseInt(string(arg.(String)), 0, 0)
if err != nil {
return 0, err
}
return int(num), nil
}
func toRune(arg Value) (rune, error) {
ss, ok := arg.(String)
if !ok {
return -1, fmt.Errorf("must be string")
}
s := string(ss)
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError {
return -1, fmt.Errorf("string is not valid UTF-8")
}
if size != len(s) {
return -1, fmt.Errorf("string has multiple runes")
}
return r, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
if len(nums) == 0 {
// Unary -
sum = -sum
} else {
for _, f := range nums {
sum -= f
}
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func slash(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) == 0 {
// cd /
cdInner("/", ec)
return
}
// Division
wrappedDivide(ec, args, opts)
}
var wrappedDivide = WrapFn(divide)
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
func mod(ec *EvalCtx, a, b int) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(a % b))
}
func randFn(ec *EvalCtx) {
out := ec.ports[1].Chan
out <- String(fmt.Sprint(rand.Float64()))
}
func randint(ec *EvalCtx, low, high int) {
if low >= high {
throw(ErrArgs)
}
out := ec.ports[1].Chan
i := low + rand.Intn(high-low)
out <- String(strconv.Itoa(i))
}
func ord(ec *EvalCtx, s string) {
out := ec.ports[1].Chan
for _, r := range s {
out <- String(fmt.Sprintf("0x%x", r))
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
func rangeFn(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var lower, upper int
step := 1
var err error
switch len(args) {
case 1:
upper, err = toInt(args[0])
maybeThrow(err)
case 2, 3:
lower, err = toInt(args[0])
maybeThrow(err)
upper, err = toInt(args[1])
maybeThrow(err)
if len(args) == 3 {
step, err = toInt(args[2])
maybeThrow(err)
}
default:
throw(ErrArgs)
}
out := ec.ports[1].Chan
for i := lower; i < upper; i += step {
out <- String(strconv.Itoa(i))
}
}
func boolFn(ec *EvalCtx, v Value) {
out := ec.ports[1].Chan
out <- Bool(ToBool(v))
}
func is(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
ec.falsify()
return
}
}
}
func eq(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
ec.falsify()
return
}
}
}
func resolveFn(ec *EvalCtx, cmd String) {
out := ec.ports[1].Chan
out <- resolve(string(cmd), ec)
}
func take(ec *EvalCtx, n int, iterate func(func(Value))) {
out := ec.ports[1].Chan
i := 0
iterate(func(v Value) {
if i < n {
out <- v
}
i++
})
}
func count(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var n int
switch len(args) {
case 0:
// Count inputs.
ec.IterateInputs(func(Value) {
n++
})
case 1:
// Get length of argument.
v := args[0]
if lener, ok := v.(Lener); ok {
n = lener.Len()
} else if iterator, ok := v.(Iterator); ok {
iterator.Iterate(func(Value) bool {
n++
return true
})
} else {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
default:
throw(errors.New("want 0 or 1 argument"))
}
ec.ports[1].Chan <- String(strconv.Itoa(n))
}
func overrideWcwidth(ec *EvalCtx, s String, w int) {
r, err := toRune(s)
maybeThrow(err)
util.OverrideWcwidth(r, w)
}
func wcswidth(ec *EvalCtx, s String) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(util.Wcswidth(string(s))))
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
// TODO find command name
errors[i] = Error{NewExternalCmdExit(fmt.Sprintf("(pid %d)", pid), ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func fopen(ec *EvalCtx, name string) {
// TODO support opening files for writing etc as well.
out := ec.ports[1].Chan
f, err := os.Open(name)
maybeThrow(err)
out <- File{f}
}
func pipe(ec *EvalCtx) {
r, w, err := os.Pipe()
out := ec.ports[1].Chan
maybeThrow(err)
out <- Pipe{r, w}
}
func fclose(ec *EvalCtx, f File) { maybeThrow(f.inner.Close()) }
func prclose(ec *EvalCtx, p Pipe) { maybeThrow(p.r.Close()) }
func pwclose(ec *EvalCtx, p Pipe) { maybeThrow(p.w.Close()) }
func sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.Interrupts():
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _time(ec *EvalCtx, f FnValue) {
t0 := time.Now()
f.Call(ec, NoArgs, NoOpts)
t1 := time.Now()
dt := t1.Sub(t0)
fmt.Fprintln(ec.ports[1].File, dt)
}
func _ifaddrs(ec *EvalCtx) {
out := ec.ports[1].Chan
addrs, err := net.InterfaceAddrs()
maybeThrow(err)
for _, addr := range addrs {
out <- String(addr.String())
}
}
func exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
preExit(ec)
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
func exit(ec *EvalCtx, args ...int) {
doexit := func(i int) {
preExit(ec)
os.Exit(i)
}
switch len(args) {
case 0:
doexit(0)
case 1:
doexit(args[0])
default:
throw(ErrArgs)
}
}
func preExit(ec *EvalCtx) {
err := ec.Store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
|
package eval
// Builtin functions.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/elves/elvish/parse"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value, map[string]Value)
}
var _ FnValue = &BuiltinFn{}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "<builtin " + b.Name + ">"
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value, opts map[string]Value) {
b.Impl(ec, args, opts)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
&BuiltinFn{"true", nop},
&BuiltinFn{"false", falseFn},
&BuiltinFn{"print", WrapFn(print, OptSpec{"sep", String(" ")})},
&BuiltinFn{"echo", WrapFn(echo, OptSpec{"sep", String(" ")})},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"slurp", WrapFn(slurp)},
&BuiltinFn{"into-lines", WrapFn(intoLines)},
&BuiltinFn{"put", put},
&BuiltinFn{"unpack", WrapFn(unpack)},
&BuiltinFn{"joins", WrapFn(joins)},
&BuiltinFn{"splits", WrapFn(splits, OptSpec{"sep", String("")})},
&BuiltinFn{"has-prefix", WrapFn(hasPrefix)},
&BuiltinFn{"has-suffix", WrapFn(hasSuffix)},
&BuiltinFn{"<s",
wrapStrCompare(func(a, b string) bool { return a < b })},
&BuiltinFn{"<=s",
wrapStrCompare(func(a, b string) bool { return a <= b })},
&BuiltinFn{"==s",
wrapStrCompare(func(a, b string) bool { return a == b })},
&BuiltinFn{"!=s",
wrapStrCompare(func(a, b string) bool { return a != b })},
&BuiltinFn{">s",
wrapStrCompare(func(a, b string) bool { return a > b })},
&BuiltinFn{">=s",
wrapStrCompare(func(a, b string) bool { return a >= b })},
&BuiltinFn{"to-json", WrapFn(toJSON)},
&BuiltinFn{"from-json", WrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", WrapFn(fail)},
&BuiltinFn{"multi-error", WrapFn(multiErrorFn)},
&BuiltinFn{"return", WrapFn(returnFn)},
&BuiltinFn{"break", WrapFn(breakFn)},
&BuiltinFn{"continue", WrapFn(continueFn)},
&BuiltinFn{"each", WrapFn(each)},
&BuiltinFn{"peach", WrapFn(peach)},
&BuiltinFn{"eawk", WrapFn(eawk)},
&BuiltinFn{"constantly", constantly},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", WrapFn(dirs)},
&BuiltinFn{"history", WrapFn(history)},
&BuiltinFn{"path-abs", wrapStringToStringError(filepath.Abs)},
&BuiltinFn{"path-base", wrapStringToString(filepath.Base)},
&BuiltinFn{"path-clean", wrapStringToString(filepath.Clean)},
&BuiltinFn{"path-dir", wrapStringToString(filepath.Dir)},
&BuiltinFn{"path-ext", wrapStringToString(filepath.Ext)},
&BuiltinFn{"eval-symlinks", wrapStringToStringError(filepath.EvalSymlinks)},
&BuiltinFn{"source", WrapFn(source)},
&BuiltinFn{"+", WrapFn(plus)},
&BuiltinFn{"-", WrapFn(minus)},
&BuiltinFn{"*", WrapFn(times)},
&BuiltinFn{"/", slash},
&BuiltinFn{"^", WrapFn(pow)},
&BuiltinFn{"<",
wrapNumCompare(func(a, b float64) bool { return a < b })},
&BuiltinFn{"<=",
wrapNumCompare(func(a, b float64) bool { return a <= b })},
&BuiltinFn{"==",
wrapNumCompare(func(a, b float64) bool { return a == b })},
&BuiltinFn{"!=",
wrapNumCompare(func(a, b float64) bool { return a != b })},
&BuiltinFn{">",
wrapNumCompare(func(a, b float64) bool { return a > b })},
&BuiltinFn{">=",
wrapNumCompare(func(a, b float64) bool { return a >= b })},
&BuiltinFn{"%", WrapFn(mod)},
&BuiltinFn{"rand", WrapFn(randFn)},
&BuiltinFn{"randint", WrapFn(randint)},
&BuiltinFn{"ord", WrapFn(ord)},
&BuiltinFn{"base", WrapFn(base)},
&BuiltinFn{"range", rangeFn},
&BuiltinFn{"bool", WrapFn(boolFn)},
&BuiltinFn{"is", is},
&BuiltinFn{"eq", eq},
&BuiltinFn{"resolve", WrapFn(resolveFn)},
&BuiltinFn{"take", WrapFn(take)},
&BuiltinFn{"count", count},
&BuiltinFn{"wcswidth", WrapFn(wcswidth)},
&BuiltinFn{"fg", WrapFn(fg)},
&BuiltinFn{"tilde-abbr", WrapFn(tildeAbbr)},
&BuiltinFn{"fopen", WrapFn(fopen)},
&BuiltinFn{"fclose", WrapFn(fclose)},
&BuiltinFn{"pipe", WrapFn(pipe)},
&BuiltinFn{"prclose", WrapFn(prclose)},
&BuiltinFn{"pwclose", WrapFn(pwclose)},
&BuiltinFn{"esleep", WrapFn(sleep)},
&BuiltinFn{"exec", WrapFn(exec)},
&BuiltinFn{"exit", WrapFn(exit)},
&BuiltinFn{"-stack", WrapFn(_stack)},
&BuiltinFn{"-log", WrapFn(_log)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
// For rand and randint.
rand.Seed(time.Now().UTC().UnixNano())
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
iterateType = reflect.TypeOf((func(func(Value)))(nil))
stringValueType = reflect.TypeOf(String(""))
)
// WrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature of
// the inner function and option specifications. The inner function must accept
// EvalCtx* as the first argument, followed by options, followed by arguments.
func WrapFn(inner interface{}, optSpecs ...OptSpec) func(*EvalCtx, []Value, map[string]Value) {
funcType := reflect.TypeOf(inner)
if funcType.In(0) != evalCtxType {
panic("bad func to wrap, first argument not *EvalCtx")
}
nopts := len(optSpecs)
optsTo := nopts + 1
optSet := NewOptSet(optSpecs...)
// Range occupied by fixed arguments in the argument list to inner.
fixedArgsFrom, fixedArgsTo := optsTo, funcType.NumIn()
isVariadic := funcType.IsVariadic()
hasOptionalIterate := false
var variadicType reflect.Type
if isVariadic {
fixedArgsTo--
variadicType = funcType.In(funcType.NumIn() - 1).Elem()
if !supportedArgType(variadicType) {
panic(fmt.Sprintf("bad func to wrap, variadic argument type %s unsupported", variadicType))
}
} else if funcType.In(funcType.NumIn()-1) == iterateType {
fixedArgsTo--
hasOptionalIterate = true
}
for i := 1; i < fixedArgsTo; i++ {
if !supportedArgType(funcType.In(i)) {
panic(fmt.Sprintf("bad func to wrap, argument type %s unsupported", funcType.In(i)))
}
}
nFixedArgs := fixedArgsTo - fixedArgsFrom
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
// Check arity of arguments.
if isVariadic {
if len(args) < nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d or more arguments, got %d", nFixedArgs, len(args)))
}
} else if hasOptionalIterate {
if len(args) < nFixedArgs || len(args) > nFixedArgs+1 {
throw(fmt.Errorf("arity mismatch: want %d or %d arguments, got %d", nFixedArgs, nFixedArgs+1, len(args)))
}
} else if len(args) != nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d arguments, got %d", nFixedArgs, len(args)))
}
convertedArgs := make([]reflect.Value, 1+nopts+len(args))
convertedArgs[0] = reflect.ValueOf(ec)
// Convert and fill options.
var err error
optValues := optSet.MustPick(opts)
for i, v := range optValues {
convertedArgs[1+i], err = convertArg(v, funcType.In(1+i))
if err != nil {
throw(errors.New("bad option " + parse.Quote(optSet.optSpecs[i].Name) + ": " + err.Error()))
}
}
// Convert and fill fixed arguments.
for i, arg := range args[:nFixedArgs] {
convertedArgs[fixedArgsFrom+i], err = convertArg(arg, funcType.In(fixedArgsFrom+i))
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
if isVariadic {
for i, arg := range args[nFixedArgs:] {
convertedArgs[fixedArgsTo+i], err = convertArg(arg, variadicType)
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
} else if hasOptionalIterate {
var iterate func(func(Value))
if len(args) == nFixedArgs {
// No Iterator specified in arguments. Use input.
// Since convertedArgs was created according to the size of the
// actual argument list, we now an empty element to make room
// for this additional iterator argument.
convertedArgs = append(convertedArgs, reflect.Value{})
iterate = ec.IterateInputs
} else {
iterator, ok := args[nFixedArgs].(Iterator)
if !ok {
throw(errors.New("bad argument: need iterator, got " + args[nFixedArgs].Kind()))
}
iterate = func(f func(Value)) {
iterator.Iterate(func(v Value) bool {
f(v)
return true
})
}
}
convertedArgs[fixedArgsTo] = reflect.ValueOf(iterate)
}
reflect.ValueOf(inner).Call(convertedArgs)
}
}
func supportedArgType(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArg(arg Value, wantType reflect.Type) (reflect.Value, error) {
var converted interface{}
var err error
switch wantType.Kind() {
case reflect.String:
if wantType == stringValueType {
converted = String(ToString(arg))
} else {
converted = ToString(arg)
}
case reflect.Int:
converted, err = toInt(arg)
case reflect.Float64:
converted, err = toFloat(arg)
default:
if reflect.TypeOf(arg).ConvertibleTo(wantType) {
converted = arg
} else {
err = fmt.Errorf("need %s", wantType.Name())
}
}
return reflect.ValueOf(converted), err
}
func wrapStringToString(f func(string) string) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
ec.ports[1].Chan <- String(f(s))
}
}
func wrapStringToStringError(f func(string) (string, error)) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
result, err := f(s)
maybeThrow(err)
ec.ports[1].Chan <- String(result)
}
}
func wrapStrCompare(cmp func(a, b string) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for _, a := range args {
if _, ok := a.(String); !ok {
throw(ErrArgs)
}
}
for i := 0; i < len(args)-1; i++ {
if !cmp(string(args[i].(String)), string(args[i+1].(String))) {
ec.falsify()
return
}
}
}
}
func wrapNumCompare(cmp func(a, b float64) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
floats := make([]float64, len(args))
for i, a := range args {
f, err := toFloat(a)
maybeThrow(err)
floats[i] = f
}
for i := 0; i < len(floats)-1; i++ {
if !cmp(floats[i], floats[i+1]) {
ec.falsify()
return
}
}
}
}
var errMustBeOneString = errors.New("must be one string argument")
func mustGetOneString(args []Value) string {
if len(args) != 1 {
throw(errMustBeOneString)
}
s, ok := args[0].(String)
if !ok {
throw(errMustBeOneString)
}
return string(s)
}
func nop(ec *EvalCtx, args []Value, opts map[string]Value) {
}
func falseFn(ec *EvalCtx, args []Value, opts map[string]Value) {
ec.falsify()
}
func put(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func kindOf(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, sepv String, args ...string) {
out := ec.ports[1].File
sep := string(sepv)
for i, arg := range args {
if i > 0 {
out.WriteString(sep)
}
out.WriteString(arg)
}
}
func echo(ec *EvalCtx, sep String, args ...string) {
print(ec, sep, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func slurp(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
all, err := ioutil.ReadAll(in)
if err != nil {
b, err := sys.GetNonblock(0)
fmt.Println("stdin is nonblock:", b, err)
fmt.Println("stdin is stdin:", in == os.Stdin)
}
maybeThrow(err)
out <- String(string(all))
}
func intoLines(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
iterate(func(v Value) {
fmt.Fprintln(out, ToString(v))
})
}
// unpack takes Elemser's from the input and unpack them.
func unpack(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].Chan
iterate(func(v Value) {
iterator, ok := v.(Iterator)
if !ok {
throwf("unpack wants iterator in input, got %s", v.Kind())
}
iterator.Iterate(func(v Value) bool {
out <- v
return true
})
})
}
// joins joins all input strings with a delimiter.
func joins(ec *EvalCtx, sep String, iterate func(func(Value))) {
var buf bytes.Buffer
iterate(func(v Value) {
if s, ok := v.(String); ok {
if buf.Len() > 0 {
buf.WriteString(string(sep))
}
buf.WriteString(string(s))
} else {
throwf("join wants string input, got %s", v.Kind())
}
})
out := ec.ports[1].Chan
out <- String(buf.String())
}
// splits splits an argument strings by a delimiter and writes all pieces.
func splits(ec *EvalCtx, sep, s String) {
out := ec.ports[1].Chan
parts := strings.Split(string(s), string(sep))
for _, p := range parts {
out <- String(p)
}
}
func hasPrefix(ec *EvalCtx, s, prefix String) {
if !strings.HasPrefix(string(s), string(prefix)) {
ec.falsify()
}
}
func hasSuffix(ec *EvalCtx, s, suffix String) {
if !strings.HasSuffix(string(s), string(suffix)) {
ec.falsify()
}
}
// toJSON converts a stream of Value's to JSON data.
func toJSON(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
enc := json.NewEncoder(out)
iterate(func(v Value) {
err := enc.Encode(v)
maybeThrow(err)
})
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = NullClosedInput
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
// peach takes a single closure and applies it to all input values in parallel.
func peach(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
var w sync.WaitGroup
broken := false
var err error
iterate(func(v Value) {
if broken || err != nil {
return
}
w.Add(1)
go func() {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = NullClosedInput
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
err = ex
}
w.Done()
}()
})
w.Wait()
maybeThrow(err)
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
line, ok := v.(String)
if !ok {
throw(ErrInput)
}
args := []Value{line}
for _, field := range eawkWordSep.Split(strings.Trim(string(line), " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
// TODO: Close port 0 of newec.
ex := newec.PCall(f, args, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
func constantly(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
// XXX Repr of this fn is not right
out <- &BuiltinFn{
"created by constantly",
func(ec *EvalCtx, a []Value, o map[string]Value) {
TakeNoOpt(o)
if len(a) != 0 {
throw(ErrArgs)
}
out := ec.ports[1].Chan
for _, v := range args {
out <- v
}
},
}
}
func cd(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.Store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.Store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.Store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
store := ec.Store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func pathAbs(ec *EvalCtx, fname string) {
out := ec.ports[1].Chan
absname, err := filepath.Abs(fname)
maybeThrow(err)
out <- String(absname)
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
if len(nums) == 0 {
// Unary -
sum = -sum
} else {
for _, f := range nums {
sum -= f
}
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func slash(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) == 0 {
// cd /
cdInner("/", ec)
return
}
// Division
wrappedDivide(ec, args, opts)
}
var wrappedDivide = WrapFn(divide)
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
func mod(ec *EvalCtx, a, b int) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(a % b))
}
func randFn(ec *EvalCtx) {
out := ec.ports[1].Chan
out <- String(fmt.Sprint(rand.Float64()))
}
func randint(ec *EvalCtx, low, high int) {
if low >= high {
throw(ErrArgs)
}
out := ec.ports[1].Chan
i := low + rand.Intn(high-low)
out <- String(strconv.Itoa(i))
}
func ord(ec *EvalCtx, s string) {
out := ec.ports[1].Chan
for _, r := range s {
out <- String(fmt.Sprintf("0x%x", r))
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
func rangeFn(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var lower, upper int
step := 1
var err error
switch len(args) {
case 1:
upper, err = toInt(args[0])
maybeThrow(err)
case 2, 3:
lower, err = toInt(args[0])
maybeThrow(err)
upper, err = toInt(args[1])
maybeThrow(err)
if len(args) == 3 {
step, err = toInt(args[2])
maybeThrow(err)
}
default:
throw(ErrArgs)
}
out := ec.ports[1].Chan
for i := lower; i < upper; i += step {
out <- String(strconv.Itoa(i))
}
}
func boolFn(ec *EvalCtx, v Value) {
out := ec.ports[1].Chan
out <- Bool(ToBool(v))
}
func is(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
ec.falsify()
return
}
}
}
func eq(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
ec.falsify()
return
}
}
}
func resolveFn(ec *EvalCtx, cmd String) {
out := ec.ports[1].Chan
out <- resolve(string(cmd), ec)
}
func take(ec *EvalCtx, n int, iterate func(func(Value))) {
out := ec.ports[1].Chan
i := 0
iterate(func(v Value) {
if i < n {
out <- v
}
i++
})
}
func count(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var n int
switch len(args) {
case 0:
// Count inputs.
ec.IterateInputs(func(Value) {
n++
})
case 1:
// Get length of argument.
v := args[0]
if lener, ok := v.(Lener); ok {
n = lener.Len()
} else if iterator, ok := v.(Iterator); ok {
iterator.Iterate(func(Value) bool {
n++
return true
})
} else {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
default:
throw(errors.New("want 0 or 1 argument"))
}
ec.ports[1].Chan <- String(strconv.Itoa(n))
}
func wcswidth(ec *EvalCtx, s String) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(util.Wcswidth(string(s))))
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
// TODO find command name
errors[i] = Error{NewExternalCmdExit(fmt.Sprintf("(pid %d)", pid), ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func fopen(ec *EvalCtx, name string) {
// TODO support opening files for writing etc as well.
out := ec.ports[1].Chan
f, err := os.Open(name)
maybeThrow(err)
out <- File{f}
}
func pipe(ec *EvalCtx) {
r, w, err := os.Pipe()
out := ec.ports[1].Chan
maybeThrow(err)
out <- Pipe{r, w}
}
func fclose(ec *EvalCtx, f File) { maybeThrow(f.inner.Close()) }
func prclose(ec *EvalCtx, p Pipe) { maybeThrow(p.r.Close()) }
func pwclose(ec *EvalCtx, p Pipe) { maybeThrow(p.w.Close()) }
func sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.Interrupts():
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
preExit(ec)
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
func exit(ec *EvalCtx, args ...int) {
doexit := func(i int) {
preExit(ec)
os.Exit(i)
}
switch len(args) {
case 0:
doexit(0)
case 1:
doexit(args[0])
default:
throw(ErrArgs)
}
}
func preExit(ec *EvalCtx) {
err := ec.Store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
}
Change unpack to work on argument instead of input.
This fixes #254.
package eval
// Builtin functions.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/elves/elvish/parse"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value, map[string]Value)
}
var _ FnValue = &BuiltinFn{}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "<builtin " + b.Name + ">"
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value, opts map[string]Value) {
b.Impl(ec, args, opts)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
&BuiltinFn{"true", nop},
&BuiltinFn{"false", falseFn},
&BuiltinFn{"print", WrapFn(print, OptSpec{"sep", String(" ")})},
&BuiltinFn{"echo", WrapFn(echo, OptSpec{"sep", String(" ")})},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"slurp", WrapFn(slurp)},
&BuiltinFn{"into-lines", WrapFn(intoLines)},
&BuiltinFn{"put", put},
&BuiltinFn{"unpack", WrapFn(unpack)},
&BuiltinFn{"joins", WrapFn(joins)},
&BuiltinFn{"splits", WrapFn(splits, OptSpec{"sep", String("")})},
&BuiltinFn{"has-prefix", WrapFn(hasPrefix)},
&BuiltinFn{"has-suffix", WrapFn(hasSuffix)},
&BuiltinFn{"<s",
wrapStrCompare(func(a, b string) bool { return a < b })},
&BuiltinFn{"<=s",
wrapStrCompare(func(a, b string) bool { return a <= b })},
&BuiltinFn{"==s",
wrapStrCompare(func(a, b string) bool { return a == b })},
&BuiltinFn{"!=s",
wrapStrCompare(func(a, b string) bool { return a != b })},
&BuiltinFn{">s",
wrapStrCompare(func(a, b string) bool { return a > b })},
&BuiltinFn{">=s",
wrapStrCompare(func(a, b string) bool { return a >= b })},
&BuiltinFn{"to-json", WrapFn(toJSON)},
&BuiltinFn{"from-json", WrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", WrapFn(fail)},
&BuiltinFn{"multi-error", WrapFn(multiErrorFn)},
&BuiltinFn{"return", WrapFn(returnFn)},
&BuiltinFn{"break", WrapFn(breakFn)},
&BuiltinFn{"continue", WrapFn(continueFn)},
&BuiltinFn{"each", WrapFn(each)},
&BuiltinFn{"peach", WrapFn(peach)},
&BuiltinFn{"eawk", WrapFn(eawk)},
&BuiltinFn{"constantly", constantly},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", WrapFn(dirs)},
&BuiltinFn{"history", WrapFn(history)},
&BuiltinFn{"path-abs", wrapStringToStringError(filepath.Abs)},
&BuiltinFn{"path-base", wrapStringToString(filepath.Base)},
&BuiltinFn{"path-clean", wrapStringToString(filepath.Clean)},
&BuiltinFn{"path-dir", wrapStringToString(filepath.Dir)},
&BuiltinFn{"path-ext", wrapStringToString(filepath.Ext)},
&BuiltinFn{"eval-symlinks", wrapStringToStringError(filepath.EvalSymlinks)},
&BuiltinFn{"source", WrapFn(source)},
&BuiltinFn{"+", WrapFn(plus)},
&BuiltinFn{"-", WrapFn(minus)},
&BuiltinFn{"*", WrapFn(times)},
&BuiltinFn{"/", slash},
&BuiltinFn{"^", WrapFn(pow)},
&BuiltinFn{"<",
wrapNumCompare(func(a, b float64) bool { return a < b })},
&BuiltinFn{"<=",
wrapNumCompare(func(a, b float64) bool { return a <= b })},
&BuiltinFn{"==",
wrapNumCompare(func(a, b float64) bool { return a == b })},
&BuiltinFn{"!=",
wrapNumCompare(func(a, b float64) bool { return a != b })},
&BuiltinFn{">",
wrapNumCompare(func(a, b float64) bool { return a > b })},
&BuiltinFn{">=",
wrapNumCompare(func(a, b float64) bool { return a >= b })},
&BuiltinFn{"%", WrapFn(mod)},
&BuiltinFn{"rand", WrapFn(randFn)},
&BuiltinFn{"randint", WrapFn(randint)},
&BuiltinFn{"ord", WrapFn(ord)},
&BuiltinFn{"base", WrapFn(base)},
&BuiltinFn{"range", rangeFn},
&BuiltinFn{"bool", WrapFn(boolFn)},
&BuiltinFn{"is", is},
&BuiltinFn{"eq", eq},
&BuiltinFn{"resolve", WrapFn(resolveFn)},
&BuiltinFn{"take", WrapFn(take)},
&BuiltinFn{"count", count},
&BuiltinFn{"wcswidth", WrapFn(wcswidth)},
&BuiltinFn{"fg", WrapFn(fg)},
&BuiltinFn{"tilde-abbr", WrapFn(tildeAbbr)},
&BuiltinFn{"fopen", WrapFn(fopen)},
&BuiltinFn{"fclose", WrapFn(fclose)},
&BuiltinFn{"pipe", WrapFn(pipe)},
&BuiltinFn{"prclose", WrapFn(prclose)},
&BuiltinFn{"pwclose", WrapFn(pwclose)},
&BuiltinFn{"esleep", WrapFn(sleep)},
&BuiltinFn{"exec", WrapFn(exec)},
&BuiltinFn{"exit", WrapFn(exit)},
&BuiltinFn{"-stack", WrapFn(_stack)},
&BuiltinFn{"-log", WrapFn(_log)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
// For rand and randint.
rand.Seed(time.Now().UTC().UnixNano())
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
iterateType = reflect.TypeOf((func(func(Value)))(nil))
stringValueType = reflect.TypeOf(String(""))
)
// WrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature of
// the inner function and option specifications. The inner function must accept
// EvalCtx* as the first argument, followed by options, followed by arguments.
func WrapFn(inner interface{}, optSpecs ...OptSpec) func(*EvalCtx, []Value, map[string]Value) {
funcType := reflect.TypeOf(inner)
if funcType.In(0) != evalCtxType {
panic("bad func to wrap, first argument not *EvalCtx")
}
nopts := len(optSpecs)
optsTo := nopts + 1
optSet := NewOptSet(optSpecs...)
// Range occupied by fixed arguments in the argument list to inner.
fixedArgsFrom, fixedArgsTo := optsTo, funcType.NumIn()
isVariadic := funcType.IsVariadic()
hasOptionalIterate := false
var variadicType reflect.Type
if isVariadic {
fixedArgsTo--
variadicType = funcType.In(funcType.NumIn() - 1).Elem()
if !supportedArgType(variadicType) {
panic(fmt.Sprintf("bad func to wrap, variadic argument type %s unsupported", variadicType))
}
} else if funcType.In(funcType.NumIn()-1) == iterateType {
fixedArgsTo--
hasOptionalIterate = true
}
for i := 1; i < fixedArgsTo; i++ {
if !supportedArgType(funcType.In(i)) {
panic(fmt.Sprintf("bad func to wrap, argument type %s unsupported", funcType.In(i)))
}
}
nFixedArgs := fixedArgsTo - fixedArgsFrom
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
// Check arity of arguments.
if isVariadic {
if len(args) < nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d or more arguments, got %d", nFixedArgs, len(args)))
}
} else if hasOptionalIterate {
if len(args) < nFixedArgs || len(args) > nFixedArgs+1 {
throw(fmt.Errorf("arity mismatch: want %d or %d arguments, got %d", nFixedArgs, nFixedArgs+1, len(args)))
}
} else if len(args) != nFixedArgs {
throw(fmt.Errorf("arity mismatch: want %d arguments, got %d", nFixedArgs, len(args)))
}
convertedArgs := make([]reflect.Value, 1+nopts+len(args))
convertedArgs[0] = reflect.ValueOf(ec)
// Convert and fill options.
var err error
optValues := optSet.MustPick(opts)
for i, v := range optValues {
convertedArgs[1+i], err = convertArg(v, funcType.In(1+i))
if err != nil {
throw(errors.New("bad option " + parse.Quote(optSet.optSpecs[i].Name) + ": " + err.Error()))
}
}
// Convert and fill fixed arguments.
for i, arg := range args[:nFixedArgs] {
convertedArgs[fixedArgsFrom+i], err = convertArg(arg, funcType.In(fixedArgsFrom+i))
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
if isVariadic {
for i, arg := range args[nFixedArgs:] {
convertedArgs[fixedArgsTo+i], err = convertArg(arg, variadicType)
if err != nil {
throw(errors.New("bad argument: " + err.Error()))
}
}
} else if hasOptionalIterate {
var iterate func(func(Value))
if len(args) == nFixedArgs {
// No Iterator specified in arguments. Use input.
// Since convertedArgs was created according to the size of the
// actual argument list, we now an empty element to make room
// for this additional iterator argument.
convertedArgs = append(convertedArgs, reflect.Value{})
iterate = ec.IterateInputs
} else {
iterator, ok := args[nFixedArgs].(Iterator)
if !ok {
throw(errors.New("bad argument: need iterator, got " + args[nFixedArgs].Kind()))
}
iterate = func(f func(Value)) {
iterator.Iterate(func(v Value) bool {
f(v)
return true
})
}
}
convertedArgs[fixedArgsTo] = reflect.ValueOf(iterate)
}
reflect.ValueOf(inner).Call(convertedArgs)
}
}
func supportedArgType(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArg(arg Value, wantType reflect.Type) (reflect.Value, error) {
var converted interface{}
var err error
switch wantType.Kind() {
case reflect.String:
if wantType == stringValueType {
converted = String(ToString(arg))
} else {
converted = ToString(arg)
}
case reflect.Int:
converted, err = toInt(arg)
case reflect.Float64:
converted, err = toFloat(arg)
default:
if reflect.TypeOf(arg).ConvertibleTo(wantType) {
converted = arg
} else {
err = fmt.Errorf("need %s", wantType.Name())
}
}
return reflect.ValueOf(converted), err
}
func wrapStringToString(f func(string) string) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
ec.ports[1].Chan <- String(f(s))
}
}
func wrapStringToStringError(f func(string) (string, error)) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
s := mustGetOneString(args)
result, err := f(s)
maybeThrow(err)
ec.ports[1].Chan <- String(result)
}
}
func wrapStrCompare(cmp func(a, b string) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for _, a := range args {
if _, ok := a.(String); !ok {
throw(ErrArgs)
}
}
for i := 0; i < len(args)-1; i++ {
if !cmp(string(args[i].(String)), string(args[i+1].(String))) {
ec.falsify()
return
}
}
}
}
func wrapNumCompare(cmp func(a, b float64) bool) func(*EvalCtx, []Value, map[string]Value) {
return func(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
floats := make([]float64, len(args))
for i, a := range args {
f, err := toFloat(a)
maybeThrow(err)
floats[i] = f
}
for i := 0; i < len(floats)-1; i++ {
if !cmp(floats[i], floats[i+1]) {
ec.falsify()
return
}
}
}
}
var errMustBeOneString = errors.New("must be one string argument")
func mustGetOneString(args []Value) string {
if len(args) != 1 {
throw(errMustBeOneString)
}
s, ok := args[0].(String)
if !ok {
throw(errMustBeOneString)
}
return string(s)
}
func nop(ec *EvalCtx, args []Value, opts map[string]Value) {
}
func falseFn(ec *EvalCtx, args []Value, opts map[string]Value) {
ec.falsify()
}
func put(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func kindOf(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, sepv String, args ...string) {
out := ec.ports[1].File
sep := string(sepv)
for i, arg := range args {
if i > 0 {
out.WriteString(sep)
}
out.WriteString(arg)
}
}
func echo(ec *EvalCtx, sep String, args ...string) {
print(ec, sep, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func slurp(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
all, err := ioutil.ReadAll(in)
if err != nil {
b, err := sys.GetNonblock(0)
fmt.Println("stdin is nonblock:", b, err)
fmt.Println("stdin is stdin:", in == os.Stdin)
}
maybeThrow(err)
out <- String(string(all))
}
func intoLines(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
iterate(func(v Value) {
fmt.Fprintln(out, ToString(v))
})
}
// unpack puts each element of the argument.
func unpack(ec *EvalCtx, v IteratorValue) {
out := ec.ports[1].Chan
v.Iterate(func(e Value) bool {
out <- e
return true
})
}
// joins joins all input strings with a delimiter.
func joins(ec *EvalCtx, sep String, iterate func(func(Value))) {
var buf bytes.Buffer
iterate(func(v Value) {
if s, ok := v.(String); ok {
if buf.Len() > 0 {
buf.WriteString(string(sep))
}
buf.WriteString(string(s))
} else {
throwf("join wants string input, got %s", v.Kind())
}
})
out := ec.ports[1].Chan
out <- String(buf.String())
}
// splits splits an argument strings by a delimiter and writes all pieces.
func splits(ec *EvalCtx, sep, s String) {
out := ec.ports[1].Chan
parts := strings.Split(string(s), string(sep))
for _, p := range parts {
out <- String(p)
}
}
func hasPrefix(ec *EvalCtx, s, prefix String) {
if !strings.HasPrefix(string(s), string(prefix)) {
ec.falsify()
}
}
func hasSuffix(ec *EvalCtx, s, suffix String) {
if !strings.HasSuffix(string(s), string(suffix)) {
ec.falsify()
}
}
// toJSON converts a stream of Value's to JSON data.
func toJSON(ec *EvalCtx, iterate func(func(Value))) {
out := ec.ports[1].File
enc := json.NewEncoder(out)
iterate(func(v Value) {
err := enc.Encode(v)
maybeThrow(err)
})
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = NullClosedInput
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
// peach takes a single closure and applies it to all input values in parallel.
func peach(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
var w sync.WaitGroup
broken := false
var err error
iterate(func(v Value) {
if broken || err != nil {
return
}
w.Add(1)
go func() {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
newec.ports[0] = NullClosedInput
ex := newec.PCall(f, []Value{v}, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
err = ex
}
w.Done()
}()
})
w.Wait()
maybeThrow(err)
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue, iterate func(func(Value))) {
broken := false
iterate(func(v Value) {
if broken {
return
}
line, ok := v.(String)
if !ok {
throw(ErrInput)
}
args := []Value{line}
for _, field := range eawkWordSep.Split(strings.Trim(string(line), " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
// TODO: Close port 0 of newec.
ex := newec.PCall(f, args, NoOpts)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
broken = true
default:
throw(ex)
}
})
}
func constantly(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
out := ec.ports[1].Chan
// XXX Repr of this fn is not right
out <- &BuiltinFn{
"created by constantly",
func(ec *EvalCtx, a []Value, o map[string]Value) {
TakeNoOpt(o)
if len(a) != 0 {
throw(ErrArgs)
}
out := ec.ports[1].Chan
for _, v := range args {
out <- v
}
},
}
}
func cd(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.Store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.Store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.Store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.Store == nil {
throw(ErrStoreNotConnected)
}
store := ec.Store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func pathAbs(ec *EvalCtx, fname string) {
out := ec.ports[1].Chan
absname, err := filepath.Abs(fname)
maybeThrow(err)
out <- String(absname)
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
if len(nums) == 0 {
// Unary -
sum = -sum
} else {
for _, f := range nums {
sum -= f
}
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func slash(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) == 0 {
// cd /
cdInner("/", ec)
return
}
// Division
wrappedDivide(ec, args, opts)
}
var wrappedDivide = WrapFn(divide)
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
func mod(ec *EvalCtx, a, b int) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(a % b))
}
func randFn(ec *EvalCtx) {
out := ec.ports[1].Chan
out <- String(fmt.Sprint(rand.Float64()))
}
func randint(ec *EvalCtx, low, high int) {
if low >= high {
throw(ErrArgs)
}
out := ec.ports[1].Chan
i := low + rand.Intn(high-low)
out <- String(strconv.Itoa(i))
}
func ord(ec *EvalCtx, s string) {
out := ec.ports[1].Chan
for _, r := range s {
out <- String(fmt.Sprintf("0x%x", r))
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
func rangeFn(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var lower, upper int
step := 1
var err error
switch len(args) {
case 1:
upper, err = toInt(args[0])
maybeThrow(err)
case 2, 3:
lower, err = toInt(args[0])
maybeThrow(err)
upper, err = toInt(args[1])
maybeThrow(err)
if len(args) == 3 {
step, err = toInt(args[2])
maybeThrow(err)
}
default:
throw(ErrArgs)
}
out := ec.ports[1].Chan
for i := lower; i < upper; i += step {
out <- String(strconv.Itoa(i))
}
}
func boolFn(ec *EvalCtx, v Value) {
out := ec.ports[1].Chan
out <- Bool(ToBool(v))
}
func is(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
ec.falsify()
return
}
}
}
func eq(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
if len(args) < 2 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
ec.falsify()
return
}
}
}
func resolveFn(ec *EvalCtx, cmd String) {
out := ec.ports[1].Chan
out <- resolve(string(cmd), ec)
}
func take(ec *EvalCtx, n int, iterate func(func(Value))) {
out := ec.ports[1].Chan
i := 0
iterate(func(v Value) {
if i < n {
out <- v
}
i++
})
}
func count(ec *EvalCtx, args []Value, opts map[string]Value) {
TakeNoOpt(opts)
var n int
switch len(args) {
case 0:
// Count inputs.
ec.IterateInputs(func(Value) {
n++
})
case 1:
// Get length of argument.
v := args[0]
if lener, ok := v.(Lener); ok {
n = lener.Len()
} else if iterator, ok := v.(Iterator); ok {
iterator.Iterate(func(Value) bool {
n++
return true
})
} else {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
default:
throw(errors.New("want 0 or 1 argument"))
}
ec.ports[1].Chan <- String(strconv.Itoa(n))
}
func wcswidth(ec *EvalCtx, s String) {
out := ec.ports[1].Chan
out <- String(strconv.Itoa(util.Wcswidth(string(s))))
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
// TODO find command name
errors[i] = Error{NewExternalCmdExit(fmt.Sprintf("(pid %d)", pid), ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func fopen(ec *EvalCtx, name string) {
// TODO support opening files for writing etc as well.
out := ec.ports[1].Chan
f, err := os.Open(name)
maybeThrow(err)
out <- File{f}
}
func pipe(ec *EvalCtx) {
r, w, err := os.Pipe()
out := ec.ports[1].Chan
maybeThrow(err)
out <- Pipe{r, w}
}
func fclose(ec *EvalCtx, f File) { maybeThrow(f.inner.Close()) }
func prclose(ec *EvalCtx, p Pipe) { maybeThrow(p.r.Close()) }
func pwclose(ec *EvalCtx, p Pipe) { maybeThrow(p.w.Close()) }
func sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.Interrupts():
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
preExit(ec)
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
func exit(ec *EvalCtx, args ...int) {
doexit := func(i int) {
preExit(ec)
os.Exit(i)
}
switch len(args) {
case 0:
doexit(0)
case 1:
doexit(args[0])
default:
throw(ErrArgs)
}
}
func preExit(ec *EvalCtx) {
err := ec.Store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
}
|
package guest
const SecurityGroups = `{{define "security_groups" }}
{{- $v := .Guest.SecurityGroups }}
MasterSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.MasterSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.MasterSecurityGroupRules }}
-
Description: {{ .Description }}
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
{{- if $v.APIWhitelistEnabled }}
{{- $g := .Guest.NATGateway }}
{{- range $g.Gateways }}
-
Description: Allow NAT gateway IP
IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: !Join [ "/", [ !Ref {{ .NATEIPName }}, "32" ] ]
{{- end}}
{{- end }}
Tags:
- Key: Name
Value: {{ $v.MasterSecurityGroupName }}
WorkerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.WorkerSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.WorkerSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
{{ if .SourceCIDR }}
CidrIp: {{ .SourceCIDR }}
{{ else }}
SourceSecurityGroupId: !Ref {{ .SourceSecurityGroup }}
{{ end }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.WorkerSecurityGroupName }}
IngressSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.IngressSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.IngressSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.IngressSecurityGroupName }}
EtcdELBSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.EtcdELBSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.EtcdELBSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.EtcdELBSecurityGroupName }}
# Allow all access between masters and workers for calico. This is done after
# the other rules to avoid circular dependencies.
MasterAllowCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref MasterSecurityGroup
MasterAllowWorkerCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref WorkerSecurityGroup
MasterAllowEtcdIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: "tcp"
FromPort: 2379
ToPort: 2379
SourceSecurityGroupId: !Ref EtcdELBSecurityGroup
WorkerAllowCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: WorkerSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref WorkerSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref WorkerSecurityGroup
WorkerAllowMasterCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: WorkerSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref WorkerSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref MasterSecurityGroup
{{ end }}`
Lock default security group (#1357)
package guest
const SecurityGroups = `{{define "security_groups" }}
{{- $v := .Guest.SecurityGroups }}
MasterSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.MasterSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.MasterSecurityGroupRules }}
-
Description: {{ .Description }}
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
{{- if $v.APIWhitelistEnabled }}
{{- $g := .Guest.NATGateway }}
{{- range $g.Gateways }}
-
Description: Allow NAT gateway IP
IpProtocol: tcp
FromPort: 443
ToPort: 443
CidrIp: !Join [ "/", [ !Ref {{ .NATEIPName }}, "32" ] ]
{{- end}}
{{- end }}
Tags:
- Key: Name
Value: {{ $v.MasterSecurityGroupName }}
WorkerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.WorkerSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.WorkerSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
{{ if .SourceCIDR }}
CidrIp: {{ .SourceCIDR }}
{{ else }}
SourceSecurityGroupId: !Ref {{ .SourceSecurityGroup }}
{{ end }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.WorkerSecurityGroupName }}
IngressSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.IngressSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.IngressSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.IngressSecurityGroupName }}
EtcdELBSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: {{ $v.EtcdELBSecurityGroupName }}
VpcId: !Ref VPC
SecurityGroupIngress:
{{ range $v.EtcdELBSecurityGroupRules }}
-
IpProtocol: {{ .Protocol }}
FromPort: {{ .Port }}
ToPort: {{ .Port }}
CidrIp: {{ .SourceCIDR }}
{{ end }}
Tags:
- Key: Name
Value: {{ $v.EtcdELBSecurityGroupName }}
# Allow all access between masters and workers for calico. This is done after
# the other rules to avoid circular dependencies.
MasterAllowCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref MasterSecurityGroup
MasterAllowWorkerCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref WorkerSecurityGroup
MasterAllowEtcdIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: MasterSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref MasterSecurityGroup
IpProtocol: "tcp"
FromPort: 2379
ToPort: 2379
SourceSecurityGroupId: !Ref EtcdELBSecurityGroup
WorkerAllowCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: WorkerSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref WorkerSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref WorkerSecurityGroup
WorkerAllowMasterCalicoIngressRule:
Type: AWS::EC2::SecurityGroupIngress
DependsOn: WorkerSecurityGroup
Properties:
# Allow access between masters and workers for calico.
GroupId: !Ref WorkerSecurityGroup
IpProtocol: -1
FromPort: -1
ToPort: -1
SourceSecurityGroupId: !Ref MasterSecurityGroup
VPCDefaultSecurityGroupEgress:
Type: AWS::EC2::SecurityGroupEgress
Properties:
GroupId: !GetAtt VPC.DefaultSecurityGroup
Description: "Allow outbound traffic from loopback address."
IpProtocol: -1
CidrIp: 127.0.0.1/32
{{ end }}`
|
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcwire_test
import (
"bytes"
"github.com/conformal/btcwire"
"github.com/davecgh/go-spew/spew"
"io"
"reflect"
"testing"
"time"
)
// TestBlock tests the MsgBlock API.
func TestBlock(t *testing.T) {
pver := btcwire.ProtocolVersion
// Block 1 header.
prevHash := &blockOne.Header.PrevBlock
merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := btcwire.NewBlockHeader(prevHash, merkleHash, bits, nonce)
// Ensure the command is expected value.
wantCmd := "block"
msg := btcwire.NewMsgBlock(bh)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlock: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
// Num addresses (varInt) + max allowed addresses.
wantPayload := uint32(1000000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same block header data back out.
if !reflect.DeepEqual(&msg.Header, bh) {
t.Errorf("NewMsgBlock: wrong block header - got %v, want %v",
spew.Sdump(&msg.Header), spew.Sdump(bh))
}
// Ensure transactions are added properly.
tx := blockOne.Transactions[0].Copy()
msg.AddTransaction(tx)
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
spew.Sdump(msg.Transactions),
spew.Sdump(blockOne.Transactions))
}
// Ensure transactions are properly cleared.
msg.ClearTransactions()
if len(msg.Transactions) != 0 {
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
len(msg.Transactions), 0)
}
return
}
// TestBlockTxShas tests the ability to generate a slice of all transaction
// hashes from a block accurately.
func TestBlockTxShas(t *testing.T) {
// Block 1, transaction 1 hash.
hashStr := "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
wantHash, err := btcwire.NewShaHashFromStr(hashStr)
if err != nil {
t.Errorf("NewShaHashFromStr: %v", err)
return
}
wantShas := []btcwire.ShaHash{*wantHash}
shas, err := blockOne.TxShas()
if err != nil {
t.Errorf("TxShas: %v", err)
}
if !reflect.DeepEqual(shas, wantShas) {
t.Errorf("TxShas: wrong transaction hashes - got %v, want %v",
spew.Sdump(shas), spew.Sdump(wantShas))
}
}
// TestBlockSha tests the ability to generate the hash of a block accurately.
func TestBlockSha(t *testing.T) {
// Block 1 hash.
hashStr := "839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048"
wantHash, err := btcwire.NewShaHashFromStr(hashStr)
if err != nil {
t.Errorf("NewShaHashFromStr: %v", err)
}
// Ensure the hash produced is expected.
blockHash, err := blockOne.BlockSha()
if err != nil {
t.Errorf("BlockSha: %v", err)
}
if !blockHash.IsEqual(wantHash) {
t.Errorf("BlockSha: wrong hash - got %v, want %v",
spew.Sprint(blockHash), spew.Sprint(wantHash))
}
}
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockWire(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Message to encode
out *btcwire.MsgBlock // Expected decoded message
buf []byte // Wire encoding
txLocs []btcwire.TxLoc // Expected transaction locations
pver uint32 // Protocol version for wire encoding
}{
// Latest protocol version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.ProtocolVersion,
},
// Protocol version BIP0035Version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.BIP0035Version,
},
// Protocol version BIP0031Version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.BIP0031Version,
},
// Protocol version NetAddressTimeVersion.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.NetAddressTimeVersion,
},
// Protocol version MultipleAddressVersion.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.MultipleAddressVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from wire format.
var msg btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf)
err = msg.BtcDecode(rbuf, test.pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestBlockWireErrors performs negative tests against wire encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockWireErrors(t *testing.T) {
// Use protocol version 60002 specifically here instead of the latest
// because the test data is using bytes encoded with that protocol
// version.
pver := uint32(60002)
tests := []struct {
in *btcwire.MsgBlock // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in prev block hash.
{&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in merkle root.
{&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, pver, 72, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, pver, 81, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
w := newFixedWriter(test.max)
err := test.in.BtcEncode(w, test.pver)
if err != test.writeErr {
t.Errorf("BtcEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
var msg btcwire.MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.BtcDecode(r, test.pver)
if err != test.readErr {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockSerialize tests MsgBlock serialize and deserialize.
func TestBlockSerialize(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Message to encode
out *btcwire.MsgBlock // Expected decoded message
buf []byte // Serialized data
txLocs []btcwire.TxLoc // Expected transaction locations
}{
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block.
var block btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf)
err = block.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&block, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&block), spew.Sdump(test.out))
continue
}
// Deserialize the block while gathering transaction location
// information.
var txLocBlock btcwire.MsgBlock
rbuf = bytes.NewBuffer(test.buf)
txLocs, err := txLocBlock.DeserializeTxLoc(rbuf)
if err != nil {
t.Errorf("DeserializeTxLoc #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&txLocBlock, test.out) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(&txLocBlock), spew.Sdump(test.out))
continue
}
if !reflect.DeepEqual(txLocs, test.txLocs) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(txLocs), spew.Sdump(test.txLocs))
continue
}
}
}
// TestBlockSerializeErrors performs negative tests against wire encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in prev block hash.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in merkle root.
{&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, 72, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, 81, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the block.
var block btcwire.MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if err != test.readErr {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
var txLocBlock btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(rbuf)
if err != test.readErr {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which
// are intentionally crafted to use large values for the number of transactions
// are handled properly. This could otherwise potentially be used as an attack
// vector.
func TestBlockOverflowErrors(t *testing.T) {
// Use protocol version 70001 specifically here instead of the latest
// protocol version because the test data is using bytes encoded with
// that version.
pver := uint32(70001)
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
{
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot
0x61, 0xbc, 0x66, 0x49, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, // Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &btcwire.MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
var msg btcwire.MsgBlock
r := bytes.NewBuffer(test.buf)
err := msg.BtcDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize from wire format.
r = bytes.NewBuffer(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize with transaction location info from wire format.
r = bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
var blockOne = btcwire.MsgBlock{
Header: btcwire.BlockHeader{
Version: 1,
PrevBlock: btcwire.ShaHash([btcwire.HashSize]byte{ // Make go vet happy.
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
}),
MerkleRoot: btcwire.ShaHash([btcwire.HashSize]byte{ // Make go vet happy.
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e,
}),
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
},
Transactions: []*btcwire.MsgTx{
{
Version: 1,
TxIn: []*btcwire.TxIn{
{
PreviousOutpoint: btcwire.OutPoint{
Hash: btcwire.ShaHash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04,
},
Sequence: 0xffffffff,
},
},
TxOut: []*btcwire.TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
},
LockTime: 0,
},
},
}
// Block one serialized bytes.
var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot
0x61, 0xbc, 0x66, 0x49, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, // Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, // Lock time
}
// Transaction location information for block one transactions.
var blockOneTxLocs = []btcwire.TxLoc{
{TxStart: 81, TxLen: 134},
}
Correct some comments in block bytes breakdown.
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcwire_test
import (
"bytes"
"github.com/conformal/btcwire"
"github.com/davecgh/go-spew/spew"
"io"
"reflect"
"testing"
"time"
)
// TestBlock tests the MsgBlock API.
func TestBlock(t *testing.T) {
pver := btcwire.ProtocolVersion
// Block 1 header.
prevHash := &blockOne.Header.PrevBlock
merkleHash := &blockOne.Header.MerkleRoot
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := btcwire.NewBlockHeader(prevHash, merkleHash, bits, nonce)
// Ensure the command is expected value.
wantCmd := "block"
msg := btcwire.NewMsgBlock(bh)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlock: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
// Num addresses (varInt) + max allowed addresses.
wantPayload := uint32(1000000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same block header data back out.
if !reflect.DeepEqual(&msg.Header, bh) {
t.Errorf("NewMsgBlock: wrong block header - got %v, want %v",
spew.Sdump(&msg.Header), spew.Sdump(bh))
}
// Ensure transactions are added properly.
tx := blockOne.Transactions[0].Copy()
msg.AddTransaction(tx)
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
spew.Sdump(msg.Transactions),
spew.Sdump(blockOne.Transactions))
}
// Ensure transactions are properly cleared.
msg.ClearTransactions()
if len(msg.Transactions) != 0 {
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
len(msg.Transactions), 0)
}
return
}
// TestBlockTxShas tests the ability to generate a slice of all transaction
// hashes from a block accurately.
func TestBlockTxShas(t *testing.T) {
// Block 1, transaction 1 hash.
hashStr := "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
wantHash, err := btcwire.NewShaHashFromStr(hashStr)
if err != nil {
t.Errorf("NewShaHashFromStr: %v", err)
return
}
wantShas := []btcwire.ShaHash{*wantHash}
shas, err := blockOne.TxShas()
if err != nil {
t.Errorf("TxShas: %v", err)
}
if !reflect.DeepEqual(shas, wantShas) {
t.Errorf("TxShas: wrong transaction hashes - got %v, want %v",
spew.Sdump(shas), spew.Sdump(wantShas))
}
}
// TestBlockSha tests the ability to generate the hash of a block accurately.
func TestBlockSha(t *testing.T) {
// Block 1 hash.
hashStr := "839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048"
wantHash, err := btcwire.NewShaHashFromStr(hashStr)
if err != nil {
t.Errorf("NewShaHashFromStr: %v", err)
}
// Ensure the hash produced is expected.
blockHash, err := blockOne.BlockSha()
if err != nil {
t.Errorf("BlockSha: %v", err)
}
if !blockHash.IsEqual(wantHash) {
t.Errorf("BlockSha: wrong hash - got %v, want %v",
spew.Sprint(blockHash), spew.Sprint(wantHash))
}
}
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockWire(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Message to encode
out *btcwire.MsgBlock // Expected decoded message
buf []byte // Wire encoding
txLocs []btcwire.TxLoc // Expected transaction locations
pver uint32 // Protocol version for wire encoding
}{
// Latest protocol version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.ProtocolVersion,
},
// Protocol version BIP0035Version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.BIP0035Version,
},
// Protocol version BIP0031Version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.BIP0031Version,
},
// Protocol version NetAddressTimeVersion.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.NetAddressTimeVersion,
},
// Protocol version MultipleAddressVersion.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
btcwire.MultipleAddressVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from wire format.
var msg btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf)
err = msg.BtcDecode(rbuf, test.pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestBlockWireErrors performs negative tests against wire encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockWireErrors(t *testing.T) {
// Use protocol version 60002 specifically here instead of the latest
// because the test data is using bytes encoded with that protocol
// version.
pver := uint32(60002)
tests := []struct {
in *btcwire.MsgBlock // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in prev block hash.
{&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in merkle root.
{&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, pver, 72, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, pver, 81, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
w := newFixedWriter(test.max)
err := test.in.BtcEncode(w, test.pver)
if err != test.writeErr {
t.Errorf("BtcEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
var msg btcwire.MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.BtcDecode(r, test.pver)
if err != test.readErr {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockSerialize tests MsgBlock serialize and deserialize.
func TestBlockSerialize(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Message to encode
out *btcwire.MsgBlock // Expected decoded message
buf []byte // Serialized data
txLocs []btcwire.TxLoc // Expected transaction locations
}{
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block.
var block btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf)
err = block.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&block, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&block), spew.Sdump(test.out))
continue
}
// Deserialize the block while gathering transaction location
// information.
var txLocBlock btcwire.MsgBlock
rbuf = bytes.NewBuffer(test.buf)
txLocs, err := txLocBlock.DeserializeTxLoc(rbuf)
if err != nil {
t.Errorf("DeserializeTxLoc #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&txLocBlock, test.out) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(&txLocBlock), spew.Sdump(test.out))
continue
}
if !reflect.DeepEqual(txLocs, test.txLocs) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(txLocs), spew.Sdump(test.txLocs))
continue
}
}
}
// TestBlockSerializeErrors performs negative tests against wire encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
in *btcwire.MsgBlock // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in prev block hash.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in merkle root.
{&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, 72, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, 81, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the block.
var block btcwire.MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if err != test.readErr {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
var txLocBlock btcwire.MsgBlock
rbuf := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(rbuf)
if err != test.readErr {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which
// are intentionally crafted to use large values for the number of transactions
// are handled properly. This could otherwise potentially be used as an attack
// vector.
func TestBlockOverflowErrors(t *testing.T) {
// Use protocol version 70001 specifically here instead of the latest
// protocol version because the test data is using bytes encoded with
// that version.
pver := uint32(70001)
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
{
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot
0x61, 0xbc, 0x66, 0x49, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, // Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &btcwire.MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
var msg btcwire.MsgBlock
r := bytes.NewBuffer(test.buf)
err := msg.BtcDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize from wire format.
r = bytes.NewBuffer(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize with transaction location info from wire format.
r = bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
var blockOne = btcwire.MsgBlock{
Header: btcwire.BlockHeader{
Version: 1,
PrevBlock: btcwire.ShaHash([btcwire.HashSize]byte{ // Make go vet happy.
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
}),
MerkleRoot: btcwire.ShaHash([btcwire.HashSize]byte{ // Make go vet happy.
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e,
}),
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
},
Transactions: []*btcwire.MsgTx{
{
Version: 1,
TxIn: []*btcwire.TxIn{
{
PreviousOutpoint: btcwire.OutPoint{
Hash: btcwire.ShaHash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04,
},
Sequence: 0xffffffff,
},
},
TxOut: []*btcwire.TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
},
LockTime: 0,
},
},
}
// Block one serialized bytes.
var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44,
0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67,
0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1,
0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot
0x61, 0xbc, 0x66, 0x49, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, // Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of transaction outputs
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, // Lock time
}
// Transaction location information for block one transactions.
var blockOneTxLocs = []btcwire.TxLoc{
{TxStart: 81, TxLen: 134},
}
|
package amazonec2
import (
"errors"
)
type region struct {
AmiId string
}
// Ubuntu 16.04 LTS 20180228.1 hvm:ebs-ssd (amd64)
// See https://cloud-images.ubuntu.com/locator/ec2/
var regionDetails map[string]*region = map[string]*region{
"ap-northeast-1": {"ami-bcb7f6da"},
"ap-northeast-2": {"ami-5073de3e"},
"ap-southeast-1": {"ami-41e4af3d"},
"ap-southeast-2": {"ami-c1498fa3"},
"ap-south-1": {"ami-1083dc7f"},
"ca-central-1": {"ami-8d9e19e9"},
"cn-north-1": {"ami-cc4499a1"}, // Note: this is 20180126
"cn-northwest-1": {"ami-fd0e1a9f"}, // Note: this is 20180126
"eu-central-1": {"ami-bc4925d3"},
"eu-west-1": {"ami-0b541372"},
"eu-west-2": {"ami-ff46a298"},
"eu-west-3": {"ami-9465d3e9"},
"sa-east-1": {"ami-b5501bd9"},
"us-east-1": {"ami-927185ef"},
"us-east-2": {"ami-b9daeddc"},
"us-west-1": {"ami-264c4646"},
"us-west-2": {"ami-78a22900"},
"us-gov-west-1": {"ami-2561ea44"},
"custom-endpoint": {""},
}
func awsRegionsList() []string {
var list []string
for k := range regionDetails {
list = append(list, k)
}
return list
}
func validateAwsRegion(region string) (string, error) {
for _, v := range awsRegionsList() {
if v == region {
return region, nil
}
}
return "", errors.New("Invalid region specified")
}
Add eu-north-1 to AMIs
package amazonec2
import (
"errors"
)
type region struct {
AmiId string
}
// Ubuntu 16.04 LTS 20180228.1 hvm:ebs-ssd (amd64)
// See https://cloud-images.ubuntu.com/locator/ec2/
var regionDetails map[string]*region = map[string]*region{
"ap-northeast-1": {"ami-bcb7f6da"},
"ap-northeast-2": {"ami-5073de3e"},
"ap-southeast-1": {"ami-41e4af3d"},
"ap-southeast-2": {"ami-c1498fa3"},
"ap-south-1": {"ami-1083dc7f"},
"ca-central-1": {"ami-8d9e19e9"},
"cn-north-1": {"ami-cc4499a1"}, // Note: this is 20180126
"cn-northwest-1": {"ami-fd0e1a9f"}, // Note: this is 20180126
"eu-central-1": {"ami-bc4925d3"},
"eu-north-1": {"ami-017ff17f"},
"eu-west-1": {"ami-0b541372"},
"eu-west-2": {"ami-ff46a298"},
"eu-west-3": {"ami-9465d3e9"},
"sa-east-1": {"ami-b5501bd9"},
"us-east-1": {"ami-927185ef"},
"us-east-2": {"ami-b9daeddc"},
"us-west-1": {"ami-264c4646"},
"us-west-2": {"ami-78a22900"},
"us-gov-west-1": {"ami-2561ea44"},
"custom-endpoint": {""},
}
func awsRegionsList() []string {
var list []string
for k := range regionDetails {
list = append(list, k)
}
return list
}
func validateAwsRegion(region string) (string, error) {
for _, v := range awsRegionsList() {
if v == region {
return region, nil
}
}
return "", errors.New("Invalid region specified")
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"http"
"io"
"image"
"image/jpeg"
"image/png"
"json"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"camli/blobref"
"camli/blobserver"
"camli/httputil"
"camli/jsonconfig"
"camli/misc/resize"
"camli/schema"
)
var _ = log.Printf
var staticFilePattern = regexp.MustCompile(`^([a-zA-Z0-9\-\_]+\.(html|js|css|png|jpg|gif))$`)
var identPattern = regexp.MustCompile(`^[a-zA-Z\_]+$`)
// Download URL suffix:
// $1: blobref (checked in download handler)
// $2: optional "/filename" to be sent as recommended download name,
// if sane looking
var downloadPattern = regexp.MustCompile(`^download/([^/]+)(/.*)?$`)
var thumbnailPattern = regexp.MustCompile(`^thumbnail/([^/]+)(/.*)?$`)
// UIHandler handles serving the UI and discovery JSON.
type UIHandler struct {
// URL prefixes (path or full URL) to the primary blob and
// search root. Only used by the UI and thus necessary if UI
// is true.
BlobRoot string
SearchRoot string
JSONSignRoot string
FilesDir string
Storage blobserver.Storage // of BlobRoot
Cache blobserver.Storage // or nil
}
func defaultFilesDir() string {
dir, _ := filepath.Split(os.Args[0])
return filepath.Join(dir, "ui")
}
func init() {
blobserver.RegisterHandlerConstructor("ui", newUiFromConfig)
}
func newUiFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err os.Error) {
ui := &UIHandler{}
ui.BlobRoot = conf.OptionalString("blobRoot", "")
ui.SearchRoot = conf.OptionalString("searchRoot", "")
ui.JSONSignRoot = conf.OptionalString("jsonSignRoot", "")
cachePrefix := conf.OptionalString("cache", "")
ui.FilesDir = conf.OptionalString("staticFiles", defaultFilesDir())
if err = conf.Validate(); err != nil {
return
}
checkType := func(key string, htype string) {
v := conf.OptionalString(key, "")
if v == "" {
return
}
ct := ld.GetHandlerType(v)
if ct == "" {
err = fmt.Errorf("UI handler's %q references non-existant %q", key, v)
} else if ct != htype {
err = fmt.Errorf("UI handler's %q references %q of type %q; expected type %q", key, v,
ct, htype)
}
}
checkType("searchRoot", "search")
checkType("jsonSignRoot", "jsonsign")
if err != nil {
return
}
if ui.BlobRoot != "" {
bs, err := ld.GetStorage(ui.BlobRoot)
if err != nil {
return nil, fmt.Errorf("UI handler's blobRoot of %q error: %v", ui.BlobRoot, err)
}
ui.Storage = bs
}
if cachePrefix != "" {
bs, err := ld.GetStorage(cachePrefix)
if err != nil {
return nil, fmt.Errorf("UI handler's cache of %q error: %v", ui.BlobRoot, err)
}
ui.Cache = bs
}
fi, sterr := os.Stat(ui.FilesDir)
if sterr != nil || !fi.IsDirectory() {
err = fmt.Errorf("UI handler's \"staticFiles\" of %q is invalid", ui.FilesDir)
return
}
return ui, nil
}
func camliMode(req *http.Request) string {
// TODO-GO: this is too hard to get at the GET Query args on a
// POST request.
m, err := http.ParseQuery(req.URL.RawQuery)
if err != nil {
return ""
}
if mode, ok := m["camli.mode"]; ok && len(mode) > 0 {
return mode[0]
}
return ""
}
func wantsDiscovery(req *http.Request) bool {
return req.Method == "GET" &&
(req.Header.Get("Accept") == "text/x-camli-configuration" ||
camliMode(req) == "config")
}
func wantsUploadHelper(req *http.Request) bool {
return req.Method == "POST" && camliMode(req) == "uploadhelper"
}
func wantsPermanode(req *http.Request) bool {
return req.Method == "GET" && blobref.Parse(req.FormValue("p")) != nil
}
func wantsBlobInfo(req *http.Request) bool {
return req.Method == "GET" && blobref.Parse(req.FormValue("b")) != nil
}
func (ui *UIHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
base := req.Header.Get("X-PrefixHandler-PathBase")
suffix := req.Header.Get("X-PrefixHandler-PathSuffix")
rw.Header().Set("Vary", "Accept")
switch {
case wantsDiscovery(req):
ui.serveDiscovery(rw, req)
case wantsUploadHelper(req):
ui.serveUploadHelper(rw, req)
case strings.HasPrefix(suffix, "download/"):
ui.serveDownload(rw, req)
case strings.HasPrefix(suffix, "thumbnail/"):
ui.serveThumbnail(rw, req)
default:
file := ""
if m := staticFilePattern.FindStringSubmatch(suffix); m != nil {
file = m[1]
} else {
switch {
case wantsPermanode(req):
file = "permanode.html"
case wantsBlobInfo(req):
file = "blobinfo.html"
case req.URL.Path == base:
file = "index.html"
default:
http.Error(rw, "Illegal URL.", 404)
return
}
}
http.ServeFile(rw, req, filepath.Join(ui.FilesDir, file))
}
}
func (ui *UIHandler) serveDiscovery(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "text/javascript")
inCb := false
if cb := req.FormValue("cb"); identPattern.MatchString(cb) {
fmt.Fprintf(rw, "%s(", cb)
inCb = true
}
bytes, _ := json.Marshal(map[string]interface{}{
"blobRoot": ui.BlobRoot,
"searchRoot": ui.SearchRoot,
"jsonSignRoot": ui.JSONSignRoot,
"uploadHelper": "?camli.mode=uploadhelper", // hack; remove with better javascript
"downloadHelper": "./download/",
})
rw.Write(bytes)
if inCb {
rw.Write([]byte{')'})
}
}
func (ui *UIHandler) storageSeekFetcher() (blobref.SeekFetcher, os.Error) {
return blobref.SeekerFromStreamingFetcher(ui.Storage)
}
func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) {
if req.Method != "GET" && req.Method != "HEAD" {
http.Error(rw, "Invalid download method", 400)
return
}
if ui.Storage == nil {
http.Error(rw, "No BlobRoot configured", 500)
return
}
fetchSeeker, err := ui.storageSeekFetcher()
if err != nil {
http.Error(rw, err.String(), 500)
return
}
suffix := req.Header.Get("X-PrefixHandler-PathSuffix")
m := downloadPattern.FindStringSubmatch(suffix)
if m == nil {
httputil.ErrorRouting(rw, req)
return
}
fbr := blobref.Parse(m[1])
if fbr == nil {
http.Error(rw, "Invalid blobref", 400)
return
}
filename := m[2]
if len(filename) > 0 {
filename = filename[1:] // remove leading slash
}
fr, err := schema.NewFileReader(fetchSeeker, fbr)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
defer fr.Close()
// TODO: fr.FileSchema() and guess a mime type? For now:
schema := fr.FileSchema()
rw.Header().Set("Content-Type", "application/octet-stream")
rw.Header().Set("Content-Length", fmt.Sprintf("%d", schema.Size))
if req.Method == "HEAD" {
vbr := blobref.Parse(req.FormValue("verifycontents"))
if vbr == nil {
return
}
hash := vbr.Hash()
if hash == nil {
return
}
io.Copy(hash, fr) // ignore errors, caught later
if vbr.HashMatches(hash) {
rw.Header().Set("X-Camli-Contents", vbr.String())
}
return
}
n, err := io.Copy(rw, fr)
log.Printf("For %q request of %s: copied %d, %v", req.Method, req.URL.RawPath, n, err)
if err != nil {
log.Printf("error serving download of file schema %s: %v", fbr, err)
return
}
if n != int64(schema.Size) {
log.Printf("error serving download of file schema %s: sent %d, expected size of %d",
fbr, n, schema.Size)
return
}
}
func (ui *UIHandler) serveThumbnail(rw http.ResponseWriter, req *http.Request) {
if ui.Storage == nil {
http.Error(rw, "No BlobRoot configured", 500)
return
}
fetchSeeker, err := ui.storageSeekFetcher()
if err != nil {
http.Error(rw, err.String(), 500)
return
}
suffix := req.URL.Path
m := thumbnailPattern.FindStringSubmatch(suffix)
if m == nil {
httputil.ErrorRouting(rw, req)
return
}
query := req.URL.Query()
width, err := strconv.Atoi(query.Get("mw"))
if err != nil {
http.Error(rw, "Invalid specified width: "+err.String(), 500)
return
}
height, err := strconv.Atoi(query.Get("my"))
if err != nil {
http.Error(rw, "Invalid specified height: "+err.String(), 500)
return
}
blobref := blobref.Parse(m[1])
if blobref == nil {
http.Error(rw, "Invalid blobref", 400)
return
}
filename := m[2]
if len(filename) > 0 {
filename = filename[1:] // remove leading slash
}
fr, err := schema.NewFileReader(fetchSeeker, blobref)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
var buf bytes.Buffer
n, err := io.Copy(&buf, fr)
i, format, err := image.Decode(&buf)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
b := i.Bounds()
// only do downscaling, otherwise just serve the original image
if width < b.Dx() || height < b.Dy() {
const huge = 2400
// If it's gigantic, it's more efficient to downsample first
// and then resize; resizing will smooth out the roughness.
// (trusting the moustachio guys on that one).
if b.Dx() > huge || b.Dy() > huge {
w, h := width * 2, height * 2
if b.Dx() > b.Dy() {
w = b.Dx() * h / b.Dy()
} else {
h = b.Dy() * w / b.Dx()
}
i = resize.Resample(i, i.Bounds(), w, h)
b = i.Bounds()
}
// conserve proportions. use the smallest of the two as the decisive one.
if width > height {
width = b.Dx() * height / b.Dy()
} else {
height = b.Dy() * width / b.Dx()
}
i = resize.Resize(i, b, width, height)
// Encode as a new image
buf.Reset()
switch format {
case "jpeg":
err = jpeg.Encode(&buf, i, nil)
default:
err = png.Encode(&buf, i)
}
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
}
ct := ""
switch format {
case "jpeg":
ct = "image/jpeg"
default:
ct = "image/png"
}
rw.Header().Set("Content-Type", ct)
size := buf.Len()
rw.Header().Set("Content-Length", fmt.Sprintf("%d", size))
n, err = io.Copy(rw, &buf)
if err != nil {
log.Printf("error serving thumbnail of file schema %s: %v", blobref, err)
return
}
if n != int64(size) {
log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d",
blobref, n, size)
return
}
}
Fix thumbnail handler
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"http"
"io"
"image"
"image/jpeg"
"image/png"
"json"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"camli/blobref"
"camli/blobserver"
"camli/httputil"
"camli/jsonconfig"
"camli/misc/resize"
"camli/schema"
)
var _ = log.Printf
var staticFilePattern = regexp.MustCompile(`^([a-zA-Z0-9\-\_]+\.(html|js|css|png|jpg|gif))$`)
var identPattern = regexp.MustCompile(`^[a-zA-Z\_]+$`)
// Download URL suffix:
// $1: blobref (checked in download handler)
// $2: optional "/filename" to be sent as recommended download name,
// if sane looking
var downloadPattern = regexp.MustCompile(`^download/([^/]+)(/.*)?$`)
var thumbnailPattern = regexp.MustCompile(`^thumbnail/([^/]+)(/.*)?$`)
// UIHandler handles serving the UI and discovery JSON.
type UIHandler struct {
// URL prefixes (path or full URL) to the primary blob and
// search root. Only used by the UI and thus necessary if UI
// is true.
BlobRoot string
SearchRoot string
JSONSignRoot string
FilesDir string
Storage blobserver.Storage // of BlobRoot
Cache blobserver.Storage // or nil
}
func defaultFilesDir() string {
dir, _ := filepath.Split(os.Args[0])
return filepath.Join(dir, "ui")
}
func init() {
blobserver.RegisterHandlerConstructor("ui", newUiFromConfig)
}
func newUiFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err os.Error) {
ui := &UIHandler{}
ui.BlobRoot = conf.OptionalString("blobRoot", "")
ui.SearchRoot = conf.OptionalString("searchRoot", "")
ui.JSONSignRoot = conf.OptionalString("jsonSignRoot", "")
cachePrefix := conf.OptionalString("cache", "")
ui.FilesDir = conf.OptionalString("staticFiles", defaultFilesDir())
if err = conf.Validate(); err != nil {
return
}
checkType := func(key string, htype string) {
v := conf.OptionalString(key, "")
if v == "" {
return
}
ct := ld.GetHandlerType(v)
if ct == "" {
err = fmt.Errorf("UI handler's %q references non-existant %q", key, v)
} else if ct != htype {
err = fmt.Errorf("UI handler's %q references %q of type %q; expected type %q", key, v,
ct, htype)
}
}
checkType("searchRoot", "search")
checkType("jsonSignRoot", "jsonsign")
if err != nil {
return
}
if ui.BlobRoot != "" {
bs, err := ld.GetStorage(ui.BlobRoot)
if err != nil {
return nil, fmt.Errorf("UI handler's blobRoot of %q error: %v", ui.BlobRoot, err)
}
ui.Storage = bs
}
if cachePrefix != "" {
bs, err := ld.GetStorage(cachePrefix)
if err != nil {
return nil, fmt.Errorf("UI handler's cache of %q error: %v", ui.BlobRoot, err)
}
ui.Cache = bs
}
fi, sterr := os.Stat(ui.FilesDir)
if sterr != nil || !fi.IsDirectory() {
err = fmt.Errorf("UI handler's \"staticFiles\" of %q is invalid", ui.FilesDir)
return
}
return ui, nil
}
func camliMode(req *http.Request) string {
// TODO-GO: this is too hard to get at the GET Query args on a
// POST request.
m, err := http.ParseQuery(req.URL.RawQuery)
if err != nil {
return ""
}
if mode, ok := m["camli.mode"]; ok && len(mode) > 0 {
return mode[0]
}
return ""
}
func wantsDiscovery(req *http.Request) bool {
return req.Method == "GET" &&
(req.Header.Get("Accept") == "text/x-camli-configuration" ||
camliMode(req) == "config")
}
func wantsUploadHelper(req *http.Request) bool {
return req.Method == "POST" && camliMode(req) == "uploadhelper"
}
func wantsPermanode(req *http.Request) bool {
return req.Method == "GET" && blobref.Parse(req.FormValue("p")) != nil
}
func wantsBlobInfo(req *http.Request) bool {
return req.Method == "GET" && blobref.Parse(req.FormValue("b")) != nil
}
func (ui *UIHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
base := req.Header.Get("X-PrefixHandler-PathBase")
suffix := req.Header.Get("X-PrefixHandler-PathSuffix")
rw.Header().Set("Vary", "Accept")
switch {
case wantsDiscovery(req):
ui.serveDiscovery(rw, req)
case wantsUploadHelper(req):
ui.serveUploadHelper(rw, req)
case strings.HasPrefix(suffix, "download/"):
ui.serveDownload(rw, req)
case strings.HasPrefix(suffix, "thumbnail/"):
ui.serveThumbnail(rw, req)
default:
file := ""
if m := staticFilePattern.FindStringSubmatch(suffix); m != nil {
file = m[1]
} else {
switch {
case wantsPermanode(req):
file = "permanode.html"
case wantsBlobInfo(req):
file = "blobinfo.html"
case req.URL.Path == base:
file = "index.html"
default:
http.Error(rw, "Illegal URL.", 404)
return
}
}
http.ServeFile(rw, req, filepath.Join(ui.FilesDir, file))
}
}
func (ui *UIHandler) serveDiscovery(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "text/javascript")
inCb := false
if cb := req.FormValue("cb"); identPattern.MatchString(cb) {
fmt.Fprintf(rw, "%s(", cb)
inCb = true
}
bytes, _ := json.Marshal(map[string]interface{}{
"blobRoot": ui.BlobRoot,
"searchRoot": ui.SearchRoot,
"jsonSignRoot": ui.JSONSignRoot,
"uploadHelper": "?camli.mode=uploadhelper", // hack; remove with better javascript
"downloadHelper": "./download/",
})
rw.Write(bytes)
if inCb {
rw.Write([]byte{')'})
}
}
func (ui *UIHandler) storageSeekFetcher() (blobref.SeekFetcher, os.Error) {
return blobref.SeekerFromStreamingFetcher(ui.Storage)
}
func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) {
if req.Method != "GET" && req.Method != "HEAD" {
http.Error(rw, "Invalid download method", 400)
return
}
if ui.Storage == nil {
http.Error(rw, "No BlobRoot configured", 500)
return
}
fetchSeeker, err := ui.storageSeekFetcher()
if err != nil {
http.Error(rw, err.String(), 500)
return
}
suffix := req.Header.Get("X-PrefixHandler-PathSuffix")
m := downloadPattern.FindStringSubmatch(suffix)
if m == nil {
httputil.ErrorRouting(rw, req)
return
}
fbr := blobref.Parse(m[1])
if fbr == nil {
http.Error(rw, "Invalid blobref", 400)
return
}
filename := m[2]
if len(filename) > 0 {
filename = filename[1:] // remove leading slash
}
fr, err := schema.NewFileReader(fetchSeeker, fbr)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
defer fr.Close()
// TODO: fr.FileSchema() and guess a mime type? For now:
schema := fr.FileSchema()
rw.Header().Set("Content-Type", "application/octet-stream")
rw.Header().Set("Content-Length", fmt.Sprintf("%d", schema.Size))
if req.Method == "HEAD" {
vbr := blobref.Parse(req.FormValue("verifycontents"))
if vbr == nil {
return
}
hash := vbr.Hash()
if hash == nil {
return
}
io.Copy(hash, fr) // ignore errors, caught later
if vbr.HashMatches(hash) {
rw.Header().Set("X-Camli-Contents", vbr.String())
}
return
}
n, err := io.Copy(rw, fr)
log.Printf("For %q request of %s: copied %d, %v", req.Method, req.URL.RawPath, n, err)
if err != nil {
log.Printf("error serving download of file schema %s: %v", fbr, err)
return
}
if n != int64(schema.Size) {
log.Printf("error serving download of file schema %s: sent %d, expected size of %d",
fbr, n, schema.Size)
return
}
}
func (ui *UIHandler) serveThumbnail(rw http.ResponseWriter, req *http.Request) {
if ui.Storage == nil {
http.Error(rw, "No BlobRoot configured", 500)
return
}
fetchSeeker, err := ui.storageSeekFetcher()
if err != nil {
http.Error(rw, err.String(), 500)
return
}
suffix := req.Header.Get("X-PrefixHandler-PathSuffix")
m := thumbnailPattern.FindStringSubmatch(suffix)
if m == nil {
httputil.ErrorRouting(rw, req)
return
}
query := req.URL.Query()
width, err := strconv.Atoi(query.Get("mw"))
if err != nil {
http.Error(rw, "Invalid specified max width 'mw': "+err.String(), 500)
return
}
height, err := strconv.Atoi(query.Get("mh"))
if err != nil {
http.Error(rw, "Invalid specified height 'mh': "+err.String(), 500)
return
}
blobref := blobref.Parse(m[1])
if blobref == nil {
http.Error(rw, "Invalid blobref", 400)
return
}
filename := m[2]
if len(filename) > 0 {
filename = filename[1:] // remove leading slash
}
fr, err := schema.NewFileReader(fetchSeeker, blobref)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
var buf bytes.Buffer
n, err := io.Copy(&buf, fr)
i, format, err := image.Decode(&buf)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
b := i.Bounds()
// only do downscaling, otherwise just serve the original image
if width < b.Dx() || height < b.Dy() {
const huge = 2400
// If it's gigantic, it's more efficient to downsample first
// and then resize; resizing will smooth out the roughness.
// (trusting the moustachio guys on that one).
if b.Dx() > huge || b.Dy() > huge {
w, h := width * 2, height * 2
if b.Dx() > b.Dy() {
w = b.Dx() * h / b.Dy()
} else {
h = b.Dy() * w / b.Dx()
}
i = resize.Resample(i, i.Bounds(), w, h)
b = i.Bounds()
}
// conserve proportions. use the smallest of the two as the decisive one.
if width > height {
width = b.Dx() * height / b.Dy()
} else {
height = b.Dy() * width / b.Dx()
}
i = resize.Resize(i, b, width, height)
// Encode as a new image
buf.Reset()
switch format {
case "jpeg":
err = jpeg.Encode(&buf, i, nil)
default:
err = png.Encode(&buf, i)
}
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
}
ct := ""
switch format {
case "jpeg":
ct = "image/jpeg"
default:
ct = "image/png"
}
rw.Header().Set("Content-Type", ct)
size := buf.Len()
rw.Header().Set("Content-Length", fmt.Sprintf("%d", size))
n, err = io.Copy(rw, &buf)
if err != nil {
log.Printf("error serving thumbnail of file schema %s: %v", blobref, err)
return
}
if n != int64(size) {
log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d",
blobref, n, size)
return
}
}
|
package main
import (
"os"
"path/filepath"
"testing"
)
func TestSetRootImport(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
s := "github.com/sparrc/gdm"
rootImport := getImportPath(wd)
if rootImport != s {
t.Errorf("Expected rootImport %s, got %s", s, rootImport)
}
}
func TestGetRepoRoot(t *testing.T) {
s := "github.com/sparrc/gdm"
_, err := getRepoRoot(s)
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
}
func TestImportsFromFile(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
filename := filepath.Join(wd, "test", "TestGodeps")
imports := ImportsFromFile(filename)
if len(imports) != 20 {
t.Errorf("Expected %d imports, got %d", 20, len(imports))
}
tests := []struct {
importpath string
rev string
}{
{"collectd.org/api", "9fc824c70f713ea0f058a07b49a4c563ef2a3b98"},
{"collectd.org/network", "9fc824c70f713ea0f058a07b49a4c563ef2a3b98"},
{"github.com/BurntSushi/toml", "056c9bc7be7190eaa7715723883caffa5f8fa3e4"},
{"github.com/bmizerany/pat", "b8a35001b773c267eb260a691f4e5499a3531600"},
{"github.com/boltdb/bolt", "b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0"},
{"github.com/davecgh/go-spew", "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"},
{"github.com/dgryski/go-bits", "86c69b3c986f9d40065df5bd8f765796549eef2e"},
{"github.com/dgryski/go-bitstream", "27cd5973303fde7d914860be1ea4b927a6be0c92"},
{"github.com/gogo/protobuf", "e492fd34b12d0230755c45aa5fb1e1eea6a84aa9"},
{"github.com/golang/snappy", "723cc1e459b8eea2dea4583200fd60757d40097a"},
{"github.com/hashicorp/raft", "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"},
{"github.com/hashicorp/raft-boltdb", "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"},
{"github.com/influxdb/enterprise-client", "25665cba4f54fa822546c611c9414ac31aa10faa"},
{"github.com/jwilder/encoding", "07d88d4f35eec497617bee0c7bfe651a796dae13"},
{"github.com/kimor79/gollectd", "61d0deeb4ffcc167b2a1baa8efd72365692811bc"},
{"github.com/paulbellamy/ratecounter", "5a11f585a31379765c190c033b6ad39956584447"},
{"github.com/peterh/liner", "4d47685ab2fd2dbb46c66b831344d558bc4be5b9"},
{"github.com/rakyll/statik", "274df120e9065bdd08eb1120e0375e3dc1ae8465"},
{"golang.org/x/crypto", "7b85b097bf7527677d54d3220065e966a0e3b613"},
{"gopkg.in/fatih/pool.v2", "cba550ebf9bce999a02e963296d4bc7a486cb715"},
}
for i, test := range tests {
i := imports[i]
if i.ImportPath != test.importpath {
t.Errorf("Expected %s, actual %s", test.importpath, i.ImportPath)
}
if i.Rev != test.rev {
t.Errorf("Expected %s, actual %s", test.rev, i.Rev)
}
}
}
Repo root import unit test fix
package main
import (
"os"
"path/filepath"
"testing"
)
func TestSetRootImport(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
s := "github.com/sparrc/gdm"
rootImport := getImportPath(wd)
if rootImport != s {
t.Errorf("Expected rootImport %s, got %s", s, rootImport)
}
}
func TestGetRepoRoot(t *testing.T) {
s := "github.com/sparrc/gdm"
_, err := getRepoRoot(s)
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
}
func TestImportsFromFile(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
filename := filepath.Join(wd, "test", "TestGodeps")
imports := ImportsFromFile(filename)
if len(imports) != 19 {
t.Errorf("Expected %d imports, got %d", 20, len(imports))
}
tests := []struct {
importpath string
rev string
}{
{"collectd.org/api", "9fc824c70f713ea0f058a07b49a4c563ef2a3b98"},
// This import is in file but has the same "repo root" as collectd.org/api
// so it shouldn't show up in the 'restore' import paths
// {"collectd.org/network", "9fc824c70f713ea0f058a07b49a4c563ef2a3b98"},
{"github.com/BurntSushi/toml", "056c9bc7be7190eaa7715723883caffa5f8fa3e4"},
{"github.com/bmizerany/pat", "b8a35001b773c267eb260a691f4e5499a3531600"},
{"github.com/boltdb/bolt", "b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0"},
{"github.com/davecgh/go-spew", "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"},
{"github.com/dgryski/go-bits", "86c69b3c986f9d40065df5bd8f765796549eef2e"},
{"github.com/dgryski/go-bitstream", "27cd5973303fde7d914860be1ea4b927a6be0c92"},
{"github.com/gogo/protobuf", "e492fd34b12d0230755c45aa5fb1e1eea6a84aa9"},
{"github.com/golang/snappy", "723cc1e459b8eea2dea4583200fd60757d40097a"},
{"github.com/hashicorp/raft", "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"},
{"github.com/hashicorp/raft-boltdb", "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"},
{"github.com/influxdb/enterprise-client", "25665cba4f54fa822546c611c9414ac31aa10faa"},
{"github.com/jwilder/encoding", "07d88d4f35eec497617bee0c7bfe651a796dae13"},
{"github.com/kimor79/gollectd", "61d0deeb4ffcc167b2a1baa8efd72365692811bc"},
{"github.com/paulbellamy/ratecounter", "5a11f585a31379765c190c033b6ad39956584447"},
{"github.com/peterh/liner", "4d47685ab2fd2dbb46c66b831344d558bc4be5b9"},
{"github.com/rakyll/statik", "274df120e9065bdd08eb1120e0375e3dc1ae8465"},
{"golang.org/x/crypto", "7b85b097bf7527677d54d3220065e966a0e3b613"},
{"gopkg.in/fatih/pool.v2", "cba550ebf9bce999a02e963296d4bc7a486cb715"},
}
for i, test := range tests {
i := imports[i]
if i.ImportPath != test.importpath {
t.Errorf("Expected %s, actual %s", test.importpath, i.ImportPath)
}
if i.Rev != test.rev {
t.Errorf("Expected %s, actual %s", test.rev, i.Rev)
}
}
}
|
package plugins
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/Jeffail/gabs"
"github.com/Seklfreak/Robyul2/helpers"
"github.com/bwmarrin/discordgo"
)
const (
streamableApiBaseUrl = "https://api.streamable.com/%s"
)
type Streamable struct{}
func (s *Streamable) Commands() []string {
return []string{
"streamable",
}
}
func (s *Streamable) Init(session *discordgo.Session) {
}
func (s *Streamable) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) { // [p]streamable [<link>] or attachment
var err error
session.ChannelTyping(msg.ChannelID)
if len(content) <= 0 && len(msg.Attachments) <= 0 {
_, err := helpers.SendMessage(msg.ChannelID, helpers.GetText("bot.arguments.invalid"))
helpers.Relax(err)
return
}
sourceUrl := content
if len(msg.Attachments) > 0 {
sourceUrl = msg.Attachments[0].URL
}
createStreamableEndpoint := fmt.Sprintf(streamableApiBaseUrl, fmt.Sprintf("import?url=%s", url.QueryEscape(sourceUrl)))
request, err := http.NewRequest("GET", createStreamableEndpoint, nil)
helpers.Relax(err)
request.Header.Add("user-agent", helpers.DEFAULT_UA)
request.SetBasicAuth(helpers.GetConfig().Path("streamable.username").Data().(string),
helpers.GetConfig().Path("streamable.password").Data().(string))
httpClient := &http.Client{
Timeout: time.Duration(10 * time.Second),
}
response, err := httpClient.Do(request)
helpers.Relax(err)
defer response.Body.Close()
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, response.Body)
helpers.Relax(err)
jsonResult, err := gabs.ParseJSON(buf.Bytes())
if err != nil || jsonResult.ExistsP("status") == false || jsonResult.Path("status").Data().(float64) >= 3 {
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Something went wrong while creating your streamable. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
return
}
helpers.SendMessage(msg.ChannelID, "Your streamable is processing, this may take a while. <:blobsleeping:317047101534109696>")
session.ChannelTyping(msg.ChannelID)
streamableShortcode := jsonResult.Path("shortcode").Data().(string)
streamableUrl := ""
CheckStreamableStatusLoop:
for {
statusStreamableEndpoint := fmt.Sprintf(streamableApiBaseUrl, fmt.Sprintf("videos/%s", streamableShortcode))
result, err := gabs.ParseJSON(helpers.NetGet(statusStreamableEndpoint))
if err != nil {
if strings.Contains(err.Error(), "Expected status 200; Got 429") {
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Too many requests, please try again later. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
} else {
helpers.Relax(err)
}
}
switch result.Path("status").Data().(float64) {
case 0:
case 1:
time.Sleep(5 * time.Second)
session.ChannelTyping(msg.ChannelID)
continue CheckStreamableStatusLoop
case 2:
streamableUrl = result.Path("url").Data().(string)
if !strings.Contains(streamableUrl, "://") {
streamableUrl = "https://" + streamableUrl
}
break CheckStreamableStatusLoop
default:
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Something went wrong while creating your streamable. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
return
}
}
_, err = helpers.SendMessage(msg.ChannelID, fmt.Sprintf("<@%s> Your streamable is done: %s .", msg.Author.ID, streamableUrl))
helpers.Relax(err)
}
[streamable] Include source url in streamable title
package plugins
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/Jeffail/gabs"
"github.com/Seklfreak/Robyul2/helpers"
"github.com/bwmarrin/discordgo"
)
const (
streamableApiBaseUrl = "https://api.streamable.com/%s"
)
type Streamable struct{}
func (s *Streamable) Commands() []string {
return []string{
"streamable",
}
}
func (s *Streamable) Init(session *discordgo.Session) {
}
func (s *Streamable) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) { // [p]streamable [<link>] or attachment
var err error
session.ChannelTyping(msg.ChannelID)
if len(content) <= 0 && len(msg.Attachments) <= 0 {
_, err := helpers.SendMessage(msg.ChannelID, helpers.GetText("bot.arguments.invalid"))
helpers.Relax(err)
return
}
sourceUrl := content
if len(msg.Attachments) > 0 {
sourceUrl = msg.Attachments[0].URL
}
createStreamableEndpoint := fmt.Sprintf(streamableApiBaseUrl, fmt.Sprintf("import?url=%s&title=%s", url.QueryEscape(sourceUrl), url.QueryEscape(sourceUrl)))
request, err := http.NewRequest("GET", createStreamableEndpoint, nil)
helpers.Relax(err)
request.Header.Add("user-agent", helpers.DEFAULT_UA)
request.SetBasicAuth(helpers.GetConfig().Path("streamable.username").Data().(string),
helpers.GetConfig().Path("streamable.password").Data().(string))
httpClient := &http.Client{
Timeout: time.Duration(10 * time.Second),
}
response, err := httpClient.Do(request)
helpers.Relax(err)
defer response.Body.Close()
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, response.Body)
helpers.Relax(err)
jsonResult, err := gabs.ParseJSON(buf.Bytes())
if err != nil || jsonResult.ExistsP("status") == false || jsonResult.Path("status").Data().(float64) >= 3 {
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Something went wrong while creating your streamable. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
return
}
helpers.SendMessage(msg.ChannelID, "Your streamable is processing, this may take a while. <:blobsleeping:317047101534109696>")
session.ChannelTyping(msg.ChannelID)
streamableShortcode := jsonResult.Path("shortcode").Data().(string)
streamableUrl := ""
CheckStreamableStatusLoop:
for {
statusStreamableEndpoint := fmt.Sprintf(streamableApiBaseUrl, fmt.Sprintf("videos/%s", streamableShortcode))
result, err := gabs.ParseJSON(helpers.NetGet(statusStreamableEndpoint))
if err != nil {
if strings.Contains(err.Error(), "Expected status 200; Got 429") {
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Too many requests, please try again later. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
} else {
helpers.Relax(err)
}
}
switch result.Path("status").Data().(float64) {
case 0:
case 1:
time.Sleep(5 * time.Second)
session.ChannelTyping(msg.ChannelID)
continue CheckStreamableStatusLoop
case 2:
streamableUrl = result.Path("url").Data().(string)
if !strings.Contains(streamableUrl, "://") {
streamableUrl = "https://" + streamableUrl
}
break CheckStreamableStatusLoop
default:
_, err = helpers.SendMessage(msg.ChannelID,
fmt.Sprintf("<@%s> Something went wrong while creating your streamable. <:blobscream:317043778823389184>",
msg.Author.ID))
helpers.Relax(err)
return
}
}
_, err = helpers.SendMessage(msg.ChannelID, fmt.Sprintf("<@%s> Your streamable is done: %s .", msg.Author.ID, streamableUrl))
helpers.Relax(err)
}
|
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Andrei Matei (andreimatei1@gmail.com)
package sql_test
import (
"bytes"
gosql "database/sql"
"fmt"
"sync"
"testing"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/server"
"github.com/cockroachdb/cockroach/sql"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util/caller"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/uuid"
"github.com/cockroachdb/pq"
)
type failureRecord struct {
err error
txn *roachpb.Transaction
}
type filterVals struct {
sync.Mutex
// key -> number of times an retriable error will be injected when that key
// is written.
restartCounts map[string]int
// key -> number of times a TransactionAborted error will be injected when
// that key is written. Note that injecting this is pretty funky: it can only
// be done on the first write of a txn, otherwise the previously written
// intents will linger on.
abortCounts map[string]int
// Map from a values to the number of times we'll inject a
// TransactionRetryError in the transaction writing that value.
endTxnRestartCounts map[string]int
// Keys for which we injected an error.
failedValues map[string]failureRecord
// Map in which we're accumulated the ids of the txns that we need to inject
// errors into because of endTxnRestartCounts.
txnsToFail map[uuid.UUID]bool
}
func createFilterVals(
restartCounts map[string]int,
abortCounts map[string]int) *filterVals {
return &filterVals{
restartCounts: restartCounts,
abortCounts: abortCounts,
failedValues: map[string]failureRecord{},
txnsToFail: map[uuid.UUID]bool{},
}
}
// checkCorrectTxn checks that the current txn is the correct one, according to
// the way the previous txn that tried to write value failed.
func checkCorrectTxn(value string, magicVals *filterVals, txn *roachpb.Transaction) {
failureRec, found := magicVals.failedValues[value]
if !found {
return
}
switch failureRec.err.(type) {
case *roachpb.TransactionAbortedError:
// The previous txn should have been aborted, so check that we're running
// in a new one.
if failureRec.txn.Equal(txn) {
panic(fmt.Sprintf("new transaction for value \"%s\" is the same "+
"as the old one", value))
}
default:
// The previous txn should have been restarted, so we should be running in
// the same one.
if !failureRec.txn.Equal(txn) {
// panic and not t.Fatal because this runs on a different goroutine
panic(fmt.Sprintf("new transaction for value \"%s\" (%s) "+
"is not the same as the old one (%s)", value,
txn, failureRec.txn))
}
}
// Don't check this value in subsequent transactions.
delete(magicVals.failedValues, value)
}
func injectErrors(
req roachpb.Request,
hdr roachpb.Header,
magicVals *filterVals,
) error {
magicVals.Lock()
defer magicVals.Unlock()
switch req := req.(type) {
case *roachpb.ConditionalPutRequest:
for key, count := range magicVals.restartCounts {
checkCorrectTxn(string(req.Value.RawBytes), magicVals, hdr.Txn)
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
magicVals.restartCounts[key]--
err := roachpb.NewReadWithinUncertaintyIntervalError(
hlc.ZeroTimestamp, hlc.ZeroTimestamp)
magicVals.failedValues[string(req.Value.RawBytes)] =
failureRecord{err, hdr.Txn}
return err
}
}
for key, count := range magicVals.abortCounts {
checkCorrectTxn(string(req.Value.RawBytes), magicVals, hdr.Txn)
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
magicVals.abortCounts[key]--
err := roachpb.NewTransactionAbortedError()
magicVals.failedValues[string(req.Value.RawBytes)] =
failureRecord{err, hdr.Txn}
return err
}
}
// If we're writing a value that's marked for an EndTransaction failure,
// keep track of the txn id so we can fail it later on.
for key, count := range magicVals.endTxnRestartCounts {
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
txnID := *hdr.Txn.TxnMeta.ID
if _, found := magicVals.txnsToFail[txnID]; found {
continue
}
magicVals.endTxnRestartCounts[key]--
magicVals.txnsToFail[txnID] = true
}
}
return nil
case *roachpb.EndTransactionRequest:
txnID := *hdr.Txn.TxnMeta.ID
if !magicVals.txnsToFail[txnID] {
return nil
}
delete(magicVals.txnsToFail, txnID)
// Note that we can't return TransactionAborted errors, although those are
// more representative for the errors that EndTransaction might encounter,
// because returning those would result in the txn's intents being left
// around.
return roachpb.NewTransactionRetryError()
default:
return nil
}
}
// checkRestart checks that there are no errors left to inject.
func checkRestarts(t *testing.T, magicVals *filterVals) {
magicVals.Lock()
defer magicVals.Unlock()
for key, count := range magicVals.restartCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: INSERT for \"%s\" still has to be retried %d times",
file, line, key, count)
}
}
for key, count := range magicVals.abortCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: INSERT for \"%s\" still has to be aborted %d times",
file, line, key, count)
}
}
for key, count := range magicVals.endTxnRestartCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: txn writing \"%s\" still has to be aborted %d times",
file, line, key, count)
}
}
if len(magicVals.txnsToFail) > 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: txns still to be failed: %v", file, line, magicVals.txnsToFail)
}
if t.Failed() {
t.Fatalf("checking error injection failed")
}
}
// TestTxnRestart tests the logic in the sql executor for automatically retrying
// txns in case of retriable errors.
func TestTxnRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
// Disable one phase commits because they cannot be restarted.
ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs).DisableOnePhaseCommits = true
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
// Make sure all the commands we send in this test are sent over the same connection.
// This is a bit of a hack; in Go you're not supposed to have connection state
// outside of using a db.Tx. But we can't use a db.Tx here, because we want
// to control the batching of BEGIN/COMMIT statements.
// This SetMaxOpenConns is pretty shady, it doesn't guarantee that you'll be using
// the *same* one connection across calls. A proper solution would be to use a
// lib/pq connection directly. As of Feb 2016, there's code in cli/sql_util.go to
// do that.
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT, t DECIMAL);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.restartCounts = map[string]int{
"boulanger": 2,
"dromedary": 2,
"fajita": 2,
"hooly": 2,
"josephine": 2,
"laureal": 2,
}
magicVals.abortCounts = map[string]int{
"boulanger": 2,
}
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 2,
"dromedary": 2,
"fajita": 2,
"hooly": 2,
}
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
// Test that implicit txns - txns for which we see all the statements and prefixes
// of txns (statements batched together with the BEGIN stmt) - are retried.
// We also exercise the SQL cluster logical timestamp in here, because
// this must be properly propagated across retries.
if _, err := sqlDB.Exec(`
INSERT INTO t.test (k, v, t) VALUES ('a', 'boulanger', cluster_logical_timestamp());
BEGIN;
INSERT INTO t.test (k, v, t) VALUES ('c', 'dromedary', cluster_logical_timestamp());
INSERT INTO t.test (k, v, t) VALUES ('e', 'fajita', cluster_logical_timestamp());
END;
INSERT INTO t.test (k, v, t) VALUES ('g', 'hooly', cluster_logical_timestamp());
BEGIN;
INSERT INTO t.test (k, v, t) VALUES ('i', 'josephine', cluster_logical_timestamp());
INSERT INTO t.test (k, v, t) VALUES ('k', 'laureal', cluster_logical_timestamp());
`); err != nil {
t.Fatal(err)
}
cleanupFilter()
checkRestarts(t, magicVals)
if _, err := sqlDB.Exec("END;"); err != nil {
t.Fatal(err)
}
// Check that the txns succeeded by reading the rows.
var count int
if err := sqlDB.QueryRow("SELECT COUNT(*) FROM t.test").Scan(&count); err != nil {
t.Fatal(err)
}
if count != 6 {
t.Fatalf("Expected 6 rows, got %d", count)
}
// Now test that we don't retry what we shouldn't: insert an error into a txn
// we can't automatically retry (because it spans requests).
magicVals = createFilterVals(nil, nil)
magicVals.restartCounts = map[string]int{
"hooly": 2,
}
cleanupFilter = cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
defer cleanupFilter()
// Start a txn.
if _, err := sqlDB.Exec(`
DELETE FROM t.test WHERE true;
BEGIN;
`); err != nil {
t.Fatal(err)
}
// Continue the txn in a new request, which is not retriable.
_, err := sqlDB.Exec("INSERT INTO t.test (k, v, t) VALUES ('g', 'hooly', cluster_logical_timestamp())")
if !testutils.IsError(
err, "encountered previous write with future timestamp") {
t.Errorf("didn't get expected injected error. Got: %s", err)
}
}
// rollbackStrategy is the type of statement which a client can use to
// rollback aborted txns from retryable errors. We accept two statements
// for rolling back to the cockroach_restart savepoint. See
// *Executor.execStmtInAbortedTxn for more about transaction retries.
type rollbackStrategy int
const (
rollbackToSavepoint rollbackStrategy = iota
declareSavepoint
)
func (rs rollbackStrategy) SQLCommand() string {
switch rs {
case rollbackToSavepoint:
return "ROLLBACK TO SAVEPOINT cockroach_restart"
case declareSavepoint:
return "SAVEPOINT cockroach_restart"
}
panic("unreachable")
}
// exec takes a closure and executes it repeatedly as long as it says it needs
// to be retried. The function also takes a rollback strategy, which specifies
// the statement which the client will use to rollback aborted txns from retryable
// errors.
// This function needs to be called from tests that set
// server.Context.TestingKnobs.ExecutorTestingKnobs.FixTxnPriority = true
func exec(t *testing.T, sqlDB *gosql.DB, rs rollbackStrategy, fn func(*gosql.Tx) bool) {
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY LOW;"); err != nil {
t.Fatal(err)
}
for fn(tx) {
if _, err := tx.Exec(rs.SQLCommand()); err != nil {
t.Fatal(err)
}
}
if err := tx.Commit(); err != nil {
t.Fatal(err)
}
}
// isRetryableErr returns whether the given error is a PG retryable error.
func isRetryableErr(err error) bool {
pqErr, ok := err.(*pq.Error)
return ok && pqErr.Code == "40001"
}
// Returns true on retriable errors.
func runTestTxn(t *testing.T, magicVals *filterVals, expectedErr string,
injectReleaseError *bool, sqlDB *gosql.DB, tx *gosql.Tx) bool {
retriesNeeded :=
(magicVals.restartCounts["boulanger"] + magicVals.abortCounts["boulanger"]) > 0
var err error
if retriesNeeded {
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (1, 'boulanger')")
if !testutils.IsError(err, expectedErr) {
t.Fatalf("expected to fail here. err: %s", err)
}
return isRetryableErr(err)
}
// Now the INSERT should succeed.
_, err = tx.Exec("DELETE FROM t.test WHERE true; INSERT INTO t.test (k, v) VALUES (0, 'sentinel');")
if err != nil {
t.Fatal(err)
}
retriesNeeded = *injectReleaseError
if retriesNeeded {
*injectReleaseError = false
abortTxn(t, sqlDB, 0)
}
_, err = tx.Exec("RELEASE SAVEPOINT cockroach_restart")
if retriesNeeded {
if err == nil {
t.Fatal("expected RELEASE to fail")
}
} else {
if err != nil {
t.Fatal(err)
}
}
return isRetryableErr(err)
}
// abortTxn writes to a key and as a side effect aborts a txn that had an intent
// on that key.
// This cannot be done as an injected error, since we want the pusher to clean
// up the intents of the pushee.
// This function needs to be called from tests that set
// server.Context.TestingKnobs.ExecutorTestingKnobs.FixTxnPriority = true
func abortTxn(t *testing.T, sqlDB *gosql.DB, key int) {
var err error
var tx *gosql.Tx
tx, err = sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SET TRANSACTION PRIORITY HIGH"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("DELETE FROM t.test WHERE k = $1", key); err != nil {
t.Fatal(err)
}
if err = tx.Commit(); err != nil {
t.Fatal(err)
}
}
// TestUserTxnRestart tests user-directed txn restarts.
// The test will inject and otherwise create retriable errors of various kinds
// and checks that we still manage to run a txn despite them.
func TestTxnUserRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
ctx.TestingKnobs.SQLExecutor = &sql.ExecutorTestingKnobs{FixTxnPriority: true}
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
testCases := []struct {
magicVals *filterVals
expectedErr string
}{
{
magicVals: createFilterVals(
map[string]int{"boulanger": 2}, // restartCounts
nil),
expectedErr: ".*encountered previous write with future timestamp.*",
},
{
magicVals: createFilterVals(
nil,
map[string]int{"boulanger": 2}), // abortCounts
expectedErr: ".*txn aborted.*",
},
}
for _, tc := range testCases {
for _, rs := range []rollbackStrategy{rollbackToSavepoint, declareSavepoint} {
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, tc.magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
// Also inject an error at RELEASE time, besides the error injected by magicVals.
injectReleaseError := true
commitCount := server.TestServer.MustGetSQLCounter("txn.commit.count")
// This is the magic. Run the txn closure until all the retries are exhausted.
exec(t, sqlDB, rs, func(tx *gosql.Tx) bool {
return runTestTxn(t, tc.magicVals, tc.expectedErr, &injectReleaseError, sqlDB, tx)
})
checkRestarts(t, tc.magicVals)
// Check that we only wrote the sentinel row.
rows, err := sqlDB.Query("SELECT * FROM t.test")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
var k int
var v string
err = rows.Scan(&k, &v)
if err != nil {
t.Fatal(err)
}
if k != 0 || v != "sentinel" {
t.Fatalf("didn't find expected row: %d %s", k, v)
}
}
// Check that the commit counter was incremented. It could have been
// incremented by more than 1 because of the transactions we use to force
// aborts, plus who knows what else the server is doing in the background.
checkCounterGE(t, server, "txn.commit.count", commitCount+1)
// Clean up the table for the next test iteration.
_, err = sqlDB.Exec("DELETE FROM t.test WHERE true")
if err != nil {
t.Fatal(err)
}
rows.Close()
cleanupFilter()
}
}
}
// Test that rando commands while in COMMIT_WAIT return a particular error.
func TestCommitWaitState(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"SAVEPOINT cockroach_restart; RELEASE cockroach_restart;"); err != nil {
t.Fatal(err)
}
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'sentinel');")
if !testutils.IsError(err, "current transaction is committed") {
t.Fatal(err)
}
// Rollback should respond with a COMMIT command tag.
err = tx.Rollback()
if !testutils.IsError(err, "unexpected command tag COMMIT") {
t.Fatal(err)
}
}
// Test that if there's an error on COMMIT that needs to be reported to the user
// the txn will be rolled back. As opposed to an error on a COMMIT in an auto-retry
// txn, where we retry the txn (not tested here).
func TestErrorOnCommitResultsInRollback(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := server.MakeTestContext()
ctx.TestingKnobs.SQLExecutor = &sql.ExecutorTestingKnobs{FixTxnPriority: true}
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY LOW;"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'sentinel');"); err != nil {
t.Fatal(err)
}
abortTxn(t, sqlDB, 0)
if err = tx.Commit(); err == nil {
t.Fatalf("unexpected error: %v", err)
}
// Check that there's no error reading and we don't see any rows.
var rows *gosql.Rows
if rows, err = sqlDB.Query("SELECT * FROM t.test"); err != nil {
t.Fatal(err)
}
if rows.Next() {
var k int
var v string
_ = rows.Scan(&k, &v)
t.Fatalf("found unexpected row: %d %s", k, v)
}
rows.Close()
}
// Test that a COMMIT getting an error, retryable or not, leaves the txn
// finalized and not in Aborted/RestartWait (i.e. COMMIT, like ROLLBACK, is
// always final).
func TestCommitFinalizesTxnOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// We need to do everything on one connection as we'll want to observe the
// connection state after a COMMIT.
sqlDB.SetMaxOpenConns(1)
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 1000, // restart many times, for all the tests below
}
defer cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)()
// We're going to test both errors that would leave the transaction in the
// RestartWait state and errors that would leave the transaction in Aborted,
// if they were to happen on any other statement than COMMIT.
// We do that by always injecting a retryable error at COMMIT, but once in a
// txn that had a "retry intent" (SAVEPOINT cockroach_restart), and once in a
// txn without it.
testCases := []struct {
retryIntent bool
}{
{false},
{true},
}
for _, tc := range testCases {
if _, err := sqlDB.Exec("BEGIN;"); err != nil {
t.Fatal(err)
}
if tc.retryIntent {
if _, err := sqlDB.Exec("SAVEPOINT cockroach_restart;"); err != nil {
t.Fatal(err)
}
}
if _, err := sqlDB.Exec("INSERT INTO t.test (k, v) VALUES (0, 'boulanger');"); err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec("COMMIT;"); !testutils.IsError(err, "pq: restart transaction") {
t.Fatalf("unexpected error: %v", err)
}
// Check that we can start another txn on the (one and only) connection.
if _, err := sqlDB.Exec("BEGIN;END;"); err != nil {
t.Fatal(err)
}
}
}
// TestRollbackToSavepointStatement tests that issuing a RESTART outside of a
// txn produces the proper error.
func TestRollbackToSavepointStatement(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
// ROLLBACK TO SAVEPOINT without a transaction
_, err := sqlDB.Exec("ROLLBACK TO SAVEPOINT cockroach_restart")
if !testutils.IsError(err, "the transaction is not in a retriable state") {
t.Fatal("expected to fail here. err: ", err)
}
// ROLLBACK TO SAVEPOINT with a wrong name
_, err = sqlDB.Exec("ROLLBACK TO SAVEPOINT foo")
if !testutils.IsError(err, "SAVEPOINT not supported except for COCKROACH_RESTART") {
t.Fatal("expected to fail here. err: ", err)
}
// ROLLBACK TO SAVEPOINT in a non-retriable transaction
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("BOGUS SQL STATEMENT"); err == nil {
t.Fatalf("expected to fail here. err: %s", err)
}
_, err = tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart")
if !testutils.IsError(err,
"SAVEPOINT COCKROACH_RESTART has not been used or a non-retriable error was encountered") {
t.Fatal("expected to fail here. err: ", err)
}
}
// TestNonRetriableError checks that a non-retriable error (e.g. duplicate key)
// doesn't leave the txn in a restartable state.
func TestNonRetriableError(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
var tx *gosql.Tx
var err error
if tx, err = sqlDB.Begin(); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'test')"); err != nil {
t.Fatal(err)
}
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'test');")
if !testutils.IsError(err, "duplicate key value") {
t.Errorf("expected duplicate key error. Got: %s", err)
}
if _, err := tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart"); !testutils.IsError(
err, "current transaction is aborted, commands ignored until end of "+
"transaction block; SAVEPOINT COCKROACH_RESTART has not been used or a "+
"non-retriable error was encountered.") {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
}
// TestRollbackInRestartWait ensures that a ROLLBACK while the txn is in the
// RetryWait state works.
func TestRollbackInRestartWait(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 1,
}
defer cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)()
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"INSERT INTO t.test (k, v) VALUES ('g', 'boulanger')"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("RELEASE SAVEPOINT cockroach_restart"); err == nil {
t.Fatal("expected RELEASE to fail")
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
}
// TestNonRetryableError verifies that a non-retryable error is propagated to the client.
func TestNonRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
testKey := []byte("test_key")
hitError := false
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if req, ok := args.Req.(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
hitError = true
return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn)
}
}
return nil
}, false)
defer cleanupFilter()
// We need to do everything on one connection as we'll want to observe the
// connection state after a COMMIT.
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
INSERT INTO t.test (k, v) VALUES ('test_key', 'test_val');
SELECT * from t.test WHERE k = 'test_key';
`); !testutils.IsError(err, "pq: testError") {
t.Errorf("unexpected error %s", err)
}
if !hitError {
t.Errorf("expected to hit error, but it didn't happen")
}
}
// TestNonRetryableError verifies that a non-retryable error from the
// execution of EndTransactionRequests is propagated to the client.
func TestNonRetryableErrorFromCommit(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
hitError := false
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if req, ok := args.Req.(*roachpb.EndTransactionRequest); ok {
if bytes.Contains(req.Key, []byte(keys.DescIDGenerator)) {
hitError = true
return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn)
}
}
return nil
}, false)
defer cleanupFilter()
if _, err := sqlDB.Exec("CREATE DATABASE t;"); !testutils.IsError(err, "pq: testError") {
t.Errorf("unexpected error %s", err)
}
if !hitError {
t.Errorf("expected to hit error, but it didn't happen")
}
}
// Verifies that a read-only transaction that triggers a deadline-exceeded error finishes
// without causing an Executor error. In particular, this test case creates a read-only txn
// that elides EndTransactionRequest and makes sure a deadline-exceeded error causes a
// retryable error.
//
// This test triggers the above scenario by making ReadWithinUncertaintyIntervalError advance
// the clock, so that the transaction timestamp exceeds the deadline of the EndTransactionRequest.
func TestTxnDeadline(t *testing.T) {
defer leaktest.AfterTest(t)()
var cmdFilters CommandFilters
cmdFilters.AppendFilter(checkEndTransactionTrigger, true)
restartDone := false
testKey := []byte("test_key")
testingKnobs := &storage.StoreTestingKnobs{
TestingCommandFilter: cmdFilters.runFilters,
ClockBeforeSend: func(c *hlc.Clock, ba roachpb.BatchRequest) {
if restartDone {
return
}
// Hack to advance the transaction timestamp on a transaction restart.
for _, union := range ba.Requests {
if req, ok := union.GetInner().(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
now := c.Now()
now.WallTime += int64(5 * sql.LeaseDuration)
c.Update(now)
break
}
}
}
},
}
ctx := server.MakeTestContext()
ctx.TestingKnobs.Store = testingKnobs
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if restartDone {
return nil
}
if req, ok := args.Req.(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
restartDone = true
// Return ReadWithinUncertaintyIntervalError to update the transaction timestamp on rery.
txn := args.Hdr.Txn
txn.ResetObservedTimestamps()
now := server.Clock().Now()
txn.UpdateObservedTimestamp(server.Gossip().GetNodeID(), now)
return roachpb.NewErrorWithTxn(roachpb.NewReadWithinUncertaintyIntervalError(now, now), txn)
}
}
return nil
}, false)
defer cleanupFilter()
// Use a large max offset to avoid rejecting a transaction whose timestanp is in
// future (as we will advance the transaction timestamp with ReadWithinUncertaintyIntervalError).
server.Clock().SetMaxOffset(sql.LeaseDuration * 10)
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
INSERT INTO t.test (k, v) VALUES ('test_key', 'test_val');
`); err != nil {
t.Fatal(err)
}
// Acquire the lease and enable the auto-retry. The first read attempt will trigger ReadWithinUncertaintyIntervalError
// and advance the transaction timestmap. The second read attempt will succeed, but the (elided) EndTransactionRequest
// hits a deadline-exceeded error.
if _, err := sqlDB.Exec(`
SELECT * from t.test WHERE k = 'test_key';
`); err != nil {
t.Fatal(err)
}
if !restartDone {
t.Errorf("expected restart, but it didn't happen")
}
}
sql: skip flaky test
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Andrei Matei (andreimatei1@gmail.com)
package sql_test
import (
"bytes"
gosql "database/sql"
"fmt"
"sync"
"testing"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/server"
"github.com/cockroachdb/cockroach/sql"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util/caller"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/uuid"
"github.com/cockroachdb/pq"
)
type failureRecord struct {
err error
txn *roachpb.Transaction
}
type filterVals struct {
sync.Mutex
// key -> number of times an retriable error will be injected when that key
// is written.
restartCounts map[string]int
// key -> number of times a TransactionAborted error will be injected when
// that key is written. Note that injecting this is pretty funky: it can only
// be done on the first write of a txn, otherwise the previously written
// intents will linger on.
abortCounts map[string]int
// Map from a values to the number of times we'll inject a
// TransactionRetryError in the transaction writing that value.
endTxnRestartCounts map[string]int
// Keys for which we injected an error.
failedValues map[string]failureRecord
// Map in which we're accumulated the ids of the txns that we need to inject
// errors into because of endTxnRestartCounts.
txnsToFail map[uuid.UUID]bool
}
func createFilterVals(
restartCounts map[string]int,
abortCounts map[string]int) *filterVals {
return &filterVals{
restartCounts: restartCounts,
abortCounts: abortCounts,
failedValues: map[string]failureRecord{},
txnsToFail: map[uuid.UUID]bool{},
}
}
// checkCorrectTxn checks that the current txn is the correct one, according to
// the way the previous txn that tried to write value failed.
func checkCorrectTxn(value string, magicVals *filterVals, txn *roachpb.Transaction) {
failureRec, found := magicVals.failedValues[value]
if !found {
return
}
switch failureRec.err.(type) {
case *roachpb.TransactionAbortedError:
// The previous txn should have been aborted, so check that we're running
// in a new one.
if failureRec.txn.Equal(txn) {
panic(fmt.Sprintf("new transaction for value \"%s\" is the same "+
"as the old one", value))
}
default:
// The previous txn should have been restarted, so we should be running in
// the same one.
if !failureRec.txn.Equal(txn) {
// panic and not t.Fatal because this runs on a different goroutine
panic(fmt.Sprintf("new transaction for value \"%s\" (%s) "+
"is not the same as the old one (%s)", value,
txn, failureRec.txn))
}
}
// Don't check this value in subsequent transactions.
delete(magicVals.failedValues, value)
}
func injectErrors(
req roachpb.Request,
hdr roachpb.Header,
magicVals *filterVals,
) error {
magicVals.Lock()
defer magicVals.Unlock()
switch req := req.(type) {
case *roachpb.ConditionalPutRequest:
for key, count := range magicVals.restartCounts {
checkCorrectTxn(string(req.Value.RawBytes), magicVals, hdr.Txn)
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
magicVals.restartCounts[key]--
err := roachpb.NewReadWithinUncertaintyIntervalError(
hlc.ZeroTimestamp, hlc.ZeroTimestamp)
magicVals.failedValues[string(req.Value.RawBytes)] =
failureRecord{err, hdr.Txn}
return err
}
}
for key, count := range magicVals.abortCounts {
checkCorrectTxn(string(req.Value.RawBytes), magicVals, hdr.Txn)
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
magicVals.abortCounts[key]--
err := roachpb.NewTransactionAbortedError()
magicVals.failedValues[string(req.Value.RawBytes)] =
failureRecord{err, hdr.Txn}
return err
}
}
// If we're writing a value that's marked for an EndTransaction failure,
// keep track of the txn id so we can fail it later on.
for key, count := range magicVals.endTxnRestartCounts {
if count > 0 && bytes.Contains(req.Value.RawBytes, []byte(key)) {
txnID := *hdr.Txn.TxnMeta.ID
if _, found := magicVals.txnsToFail[txnID]; found {
continue
}
magicVals.endTxnRestartCounts[key]--
magicVals.txnsToFail[txnID] = true
}
}
return nil
case *roachpb.EndTransactionRequest:
txnID := *hdr.Txn.TxnMeta.ID
if !magicVals.txnsToFail[txnID] {
return nil
}
delete(magicVals.txnsToFail, txnID)
// Note that we can't return TransactionAborted errors, although those are
// more representative for the errors that EndTransaction might encounter,
// because returning those would result in the txn's intents being left
// around.
return roachpb.NewTransactionRetryError()
default:
return nil
}
}
// checkRestart checks that there are no errors left to inject.
func checkRestarts(t *testing.T, magicVals *filterVals) {
magicVals.Lock()
defer magicVals.Unlock()
for key, count := range magicVals.restartCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: INSERT for \"%s\" still has to be retried %d times",
file, line, key, count)
}
}
for key, count := range magicVals.abortCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: INSERT for \"%s\" still has to be aborted %d times",
file, line, key, count)
}
}
for key, count := range magicVals.endTxnRestartCounts {
if count != 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: txn writing \"%s\" still has to be aborted %d times",
file, line, key, count)
}
}
if len(magicVals.txnsToFail) > 0 {
file, line, _ := caller.Lookup(1)
t.Errorf("%s:%d: txns still to be failed: %v", file, line, magicVals.txnsToFail)
}
if t.Failed() {
t.Fatalf("checking error injection failed")
}
}
// TestTxnRestart tests the logic in the sql executor for automatically retrying
// txns in case of retriable errors.
func TestTxnRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
// Disable one phase commits because they cannot be restarted.
ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs).DisableOnePhaseCommits = true
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
// Make sure all the commands we send in this test are sent over the same connection.
// This is a bit of a hack; in Go you're not supposed to have connection state
// outside of using a db.Tx. But we can't use a db.Tx here, because we want
// to control the batching of BEGIN/COMMIT statements.
// This SetMaxOpenConns is pretty shady, it doesn't guarantee that you'll be using
// the *same* one connection across calls. A proper solution would be to use a
// lib/pq connection directly. As of Feb 2016, there's code in cli/sql_util.go to
// do that.
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT, t DECIMAL);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.restartCounts = map[string]int{
"boulanger": 2,
"dromedary": 2,
"fajita": 2,
"hooly": 2,
"josephine": 2,
"laureal": 2,
}
magicVals.abortCounts = map[string]int{
"boulanger": 2,
}
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 2,
"dromedary": 2,
"fajita": 2,
"hooly": 2,
}
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
// Test that implicit txns - txns for which we see all the statements and prefixes
// of txns (statements batched together with the BEGIN stmt) - are retried.
// We also exercise the SQL cluster logical timestamp in here, because
// this must be properly propagated across retries.
if _, err := sqlDB.Exec(`
INSERT INTO t.test (k, v, t) VALUES ('a', 'boulanger', cluster_logical_timestamp());
BEGIN;
INSERT INTO t.test (k, v, t) VALUES ('c', 'dromedary', cluster_logical_timestamp());
INSERT INTO t.test (k, v, t) VALUES ('e', 'fajita', cluster_logical_timestamp());
END;
INSERT INTO t.test (k, v, t) VALUES ('g', 'hooly', cluster_logical_timestamp());
BEGIN;
INSERT INTO t.test (k, v, t) VALUES ('i', 'josephine', cluster_logical_timestamp());
INSERT INTO t.test (k, v, t) VALUES ('k', 'laureal', cluster_logical_timestamp());
`); err != nil {
t.Fatal(err)
}
cleanupFilter()
checkRestarts(t, magicVals)
if _, err := sqlDB.Exec("END;"); err != nil {
t.Fatal(err)
}
// Check that the txns succeeded by reading the rows.
var count int
if err := sqlDB.QueryRow("SELECT COUNT(*) FROM t.test").Scan(&count); err != nil {
t.Fatal(err)
}
if count != 6 {
t.Fatalf("Expected 6 rows, got %d", count)
}
// Now test that we don't retry what we shouldn't: insert an error into a txn
// we can't automatically retry (because it spans requests).
magicVals = createFilterVals(nil, nil)
magicVals.restartCounts = map[string]int{
"hooly": 2,
}
cleanupFilter = cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
defer cleanupFilter()
// Start a txn.
if _, err := sqlDB.Exec(`
DELETE FROM t.test WHERE true;
BEGIN;
`); err != nil {
t.Fatal(err)
}
// Continue the txn in a new request, which is not retriable.
_, err := sqlDB.Exec("INSERT INTO t.test (k, v, t) VALUES ('g', 'hooly', cluster_logical_timestamp())")
if !testutils.IsError(
err, "encountered previous write with future timestamp") {
t.Errorf("didn't get expected injected error. Got: %s", err)
}
}
// rollbackStrategy is the type of statement which a client can use to
// rollback aborted txns from retryable errors. We accept two statements
// for rolling back to the cockroach_restart savepoint. See
// *Executor.execStmtInAbortedTxn for more about transaction retries.
type rollbackStrategy int
const (
rollbackToSavepoint rollbackStrategy = iota
declareSavepoint
)
func (rs rollbackStrategy) SQLCommand() string {
switch rs {
case rollbackToSavepoint:
return "ROLLBACK TO SAVEPOINT cockroach_restart"
case declareSavepoint:
return "SAVEPOINT cockroach_restart"
}
panic("unreachable")
}
// exec takes a closure and executes it repeatedly as long as it says it needs
// to be retried. The function also takes a rollback strategy, which specifies
// the statement which the client will use to rollback aborted txns from retryable
// errors.
// This function needs to be called from tests that set
// server.Context.TestingKnobs.ExecutorTestingKnobs.FixTxnPriority = true
func exec(t *testing.T, sqlDB *gosql.DB, rs rollbackStrategy, fn func(*gosql.Tx) bool) {
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY LOW;"); err != nil {
t.Fatal(err)
}
for fn(tx) {
if _, err := tx.Exec(rs.SQLCommand()); err != nil {
t.Fatal(err)
}
}
if err := tx.Commit(); err != nil {
t.Fatal(err)
}
}
// isRetryableErr returns whether the given error is a PG retryable error.
func isRetryableErr(err error) bool {
pqErr, ok := err.(*pq.Error)
return ok && pqErr.Code == "40001"
}
// Returns true on retriable errors.
func runTestTxn(t *testing.T, magicVals *filterVals, expectedErr string,
injectReleaseError *bool, sqlDB *gosql.DB, tx *gosql.Tx) bool {
retriesNeeded :=
(magicVals.restartCounts["boulanger"] + magicVals.abortCounts["boulanger"]) > 0
var err error
if retriesNeeded {
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (1, 'boulanger')")
if !testutils.IsError(err, expectedErr) {
t.Fatalf("expected to fail here. err: %s", err)
}
return isRetryableErr(err)
}
// Now the INSERT should succeed.
_, err = tx.Exec("DELETE FROM t.test WHERE true; INSERT INTO t.test (k, v) VALUES (0, 'sentinel');")
if err != nil {
t.Fatal(err)
}
retriesNeeded = *injectReleaseError
if retriesNeeded {
*injectReleaseError = false
abortTxn(t, sqlDB, 0)
}
_, err = tx.Exec("RELEASE SAVEPOINT cockroach_restart")
if retriesNeeded {
if err == nil {
t.Fatal("expected RELEASE to fail")
}
} else {
if err != nil {
t.Fatal(err)
}
}
return isRetryableErr(err)
}
// abortTxn writes to a key and as a side effect aborts a txn that had an intent
// on that key.
// This cannot be done as an injected error, since we want the pusher to clean
// up the intents of the pushee.
// This function needs to be called from tests that set
// server.Context.TestingKnobs.ExecutorTestingKnobs.FixTxnPriority = true
func abortTxn(t *testing.T, sqlDB *gosql.DB, key int) {
var err error
var tx *gosql.Tx
tx, err = sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SET TRANSACTION PRIORITY HIGH"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("DELETE FROM t.test WHERE k = $1", key); err != nil {
t.Fatal(err)
}
if err = tx.Commit(); err != nil {
t.Fatal(err)
}
}
// TestUserTxnRestart tests user-directed txn restarts.
// The test will inject and otherwise create retriable errors of various kinds
// and checks that we still manage to run a txn despite them.
func TestTxnUserRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
ctx.TestingKnobs.SQLExecutor = &sql.ExecutorTestingKnobs{FixTxnPriority: true}
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
testCases := []struct {
magicVals *filterVals
expectedErr string
}{
{
magicVals: createFilterVals(
map[string]int{"boulanger": 2}, // restartCounts
nil),
expectedErr: ".*encountered previous write with future timestamp.*",
},
{
magicVals: createFilterVals(
nil,
map[string]int{"boulanger": 2}), // abortCounts
expectedErr: ".*txn aborted.*",
},
}
for _, tc := range testCases {
for _, rs := range []rollbackStrategy{rollbackToSavepoint, declareSavepoint} {
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, tc.magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)
// Also inject an error at RELEASE time, besides the error injected by magicVals.
injectReleaseError := true
commitCount := server.TestServer.MustGetSQLCounter("txn.commit.count")
// This is the magic. Run the txn closure until all the retries are exhausted.
exec(t, sqlDB, rs, func(tx *gosql.Tx) bool {
return runTestTxn(t, tc.magicVals, tc.expectedErr, &injectReleaseError, sqlDB, tx)
})
checkRestarts(t, tc.magicVals)
// Check that we only wrote the sentinel row.
rows, err := sqlDB.Query("SELECT * FROM t.test")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
var k int
var v string
err = rows.Scan(&k, &v)
if err != nil {
t.Fatal(err)
}
if k != 0 || v != "sentinel" {
t.Fatalf("didn't find expected row: %d %s", k, v)
}
}
// Check that the commit counter was incremented. It could have been
// incremented by more than 1 because of the transactions we use to force
// aborts, plus who knows what else the server is doing in the background.
checkCounterGE(t, server, "txn.commit.count", commitCount+1)
// Clean up the table for the next test iteration.
_, err = sqlDB.Exec("DELETE FROM t.test WHERE true")
if err != nil {
t.Fatal(err)
}
rows.Close()
cleanupFilter()
}
}
}
// Test that rando commands while in COMMIT_WAIT return a particular error.
func TestCommitWaitState(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"SAVEPOINT cockroach_restart; RELEASE cockroach_restart;"); err != nil {
t.Fatal(err)
}
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'sentinel');")
if !testutils.IsError(err, "current transaction is committed") {
t.Fatal(err)
}
// Rollback should respond with a COMMIT command tag.
err = tx.Rollback()
if !testutils.IsError(err, "unexpected command tag COMMIT") {
t.Fatal(err)
}
}
// Test that if there's an error on COMMIT that needs to be reported to the user
// the txn will be rolled back. As opposed to an error on a COMMIT in an auto-retry
// txn, where we retry the txn (not tested here).
func TestErrorOnCommitResultsInRollback(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := server.MakeTestContext()
ctx.TestingKnobs.SQLExecutor = &sql.ExecutorTestingKnobs{FixTxnPriority: true}
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY LOW;"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'sentinel');"); err != nil {
t.Fatal(err)
}
abortTxn(t, sqlDB, 0)
if err = tx.Commit(); err == nil {
t.Fatalf("unexpected error: %v", err)
}
// Check that there's no error reading and we don't see any rows.
var rows *gosql.Rows
if rows, err = sqlDB.Query("SELECT * FROM t.test"); err != nil {
t.Fatal(err)
}
if rows.Next() {
var k int
var v string
_ = rows.Scan(&k, &v)
t.Fatalf("found unexpected row: %d %s", k, v)
}
rows.Close()
}
// Test that a COMMIT getting an error, retryable or not, leaves the txn
// finalized and not in Aborted/RestartWait (i.e. COMMIT, like ROLLBACK, is
// always final).
func TestCommitFinalizesTxnOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// We need to do everything on one connection as we'll want to observe the
// connection state after a COMMIT.
sqlDB.SetMaxOpenConns(1)
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 1000, // restart many times, for all the tests below
}
defer cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)()
// We're going to test both errors that would leave the transaction in the
// RestartWait state and errors that would leave the transaction in Aborted,
// if they were to happen on any other statement than COMMIT.
// We do that by always injecting a retryable error at COMMIT, but once in a
// txn that had a "retry intent" (SAVEPOINT cockroach_restart), and once in a
// txn without it.
testCases := []struct {
retryIntent bool
}{
{false},
{true},
}
for _, tc := range testCases {
if _, err := sqlDB.Exec("BEGIN;"); err != nil {
t.Fatal(err)
}
if tc.retryIntent {
if _, err := sqlDB.Exec("SAVEPOINT cockroach_restart;"); err != nil {
t.Fatal(err)
}
}
if _, err := sqlDB.Exec("INSERT INTO t.test (k, v) VALUES (0, 'boulanger');"); err != nil {
t.Fatal(err)
}
if _, err := sqlDB.Exec("COMMIT;"); !testutils.IsError(err, "pq: restart transaction") {
t.Fatalf("unexpected error: %v", err)
}
// Check that we can start another txn on the (one and only) connection.
if _, err := sqlDB.Exec("BEGIN;END;"); err != nil {
t.Fatal(err)
}
}
}
// TestRollbackToSavepointStatement tests that issuing a RESTART outside of a
// txn produces the proper error.
func TestRollbackToSavepointStatement(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
// ROLLBACK TO SAVEPOINT without a transaction
_, err := sqlDB.Exec("ROLLBACK TO SAVEPOINT cockroach_restart")
if !testutils.IsError(err, "the transaction is not in a retriable state") {
t.Fatal("expected to fail here. err: ", err)
}
// ROLLBACK TO SAVEPOINT with a wrong name
_, err = sqlDB.Exec("ROLLBACK TO SAVEPOINT foo")
if !testutils.IsError(err, "SAVEPOINT not supported except for COCKROACH_RESTART") {
t.Fatal("expected to fail here. err: ", err)
}
// ROLLBACK TO SAVEPOINT in a non-retriable transaction
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("BOGUS SQL STATEMENT"); err == nil {
t.Fatalf("expected to fail here. err: %s", err)
}
_, err = tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart")
if !testutils.IsError(err,
"SAVEPOINT COCKROACH_RESTART has not been used or a non-retriable error was encountered") {
t.Fatal("expected to fail here. err: ", err)
}
}
// TestNonRetriableError checks that a non-retriable error (e.g. duplicate key)
// doesn't leave the txn in a restartable state.
func TestNonRetriableError(t *testing.T) {
defer leaktest.AfterTest(t)()
server, sqlDB, _ := setup(t)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
var tx *gosql.Tx
var err error
if tx, err = sqlDB.Begin(); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'test')"); err != nil {
t.Fatal(err)
}
_, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'test');")
if !testutils.IsError(err, "duplicate key value") {
t.Errorf("expected duplicate key error. Got: %s", err)
}
if _, err := tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart"); !testutils.IsError(
err, "current transaction is aborted, commands ignored until end of "+
"transaction block; SAVEPOINT COCKROACH_RESTART has not been used or a "+
"non-retriable error was encountered.") {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
}
// TestRollbackInRestartWait ensures that a ROLLBACK while the txn is in the
// RetryWait state works.
func TestRollbackInRestartWait(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
`); err != nil {
t.Fatal(err)
}
// Set up error injection that causes retries.
magicVals := createFilterVals(nil, nil)
magicVals.endTxnRestartCounts = map[string]int{
"boulanger": 1,
}
defer cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if err := injectErrors(args.Req, args.Hdr, magicVals); err != nil {
return roachpb.NewErrorWithTxn(err, args.Hdr.Txn)
}
return nil
}, false)()
tx, err := sqlDB.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec(
"INSERT INTO t.test (k, v) VALUES ('g', 'boulanger')"); err != nil {
t.Fatal(err)
}
if _, err := tx.Exec("RELEASE SAVEPOINT cockroach_restart"); err == nil {
t.Fatal("expected RELEASE to fail")
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
}
// TestNonRetryableError verifies that a non-retryable error is propagated to the client.
func TestNonRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
testKey := []byte("test_key")
hitError := false
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if req, ok := args.Req.(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
hitError = true
return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn)
}
}
return nil
}, false)
defer cleanupFilter()
// We need to do everything on one connection as we'll want to observe the
// connection state after a COMMIT.
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
INSERT INTO t.test (k, v) VALUES ('test_key', 'test_val');
SELECT * from t.test WHERE k = 'test_key';
`); !testutils.IsError(err, "pq: testError") {
t.Errorf("unexpected error %s", err)
}
if !hitError {
t.Errorf("expected to hit error, but it didn't happen")
}
}
// TestNonRetryableError verifies that a non-retryable error from the
// execution of EndTransactionRequests is propagated to the client.
func TestNonRetryableErrorFromCommit(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx, cmdFilters := createTestServerContext()
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
hitError := false
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if req, ok := args.Req.(*roachpb.EndTransactionRequest); ok {
if bytes.Contains(req.Key, []byte(keys.DescIDGenerator)) {
hitError = true
return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn)
}
}
return nil
}, false)
defer cleanupFilter()
if _, err := sqlDB.Exec("CREATE DATABASE t;"); !testutils.IsError(err, "pq: testError") {
t.Errorf("unexpected error %s", err)
}
if !hitError {
t.Errorf("expected to hit error, but it didn't happen")
}
}
// Verifies that a read-only transaction that triggers a deadline-exceeded error finishes
// without causing an Executor error. In particular, this test case creates a read-only txn
// that elides EndTransactionRequest and makes sure a deadline-exceeded error causes a
// retryable error.
//
// This test triggers the above scenario by making ReadWithinUncertaintyIntervalError advance
// the clock, so that the transaction timestamp exceeds the deadline of the EndTransactionRequest.
func TestTxnDeadline(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip("#7394")
var cmdFilters CommandFilters
cmdFilters.AppendFilter(checkEndTransactionTrigger, true)
restartDone := false
testKey := []byte("test_key")
testingKnobs := &storage.StoreTestingKnobs{
TestingCommandFilter: cmdFilters.runFilters,
ClockBeforeSend: func(c *hlc.Clock, ba roachpb.BatchRequest) {
if restartDone {
return
}
// Hack to advance the transaction timestamp on a transaction restart.
for _, union := range ba.Requests {
if req, ok := union.GetInner().(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
now := c.Now()
now.WallTime += int64(5 * sql.LeaseDuration)
c.Update(now)
break
}
}
}
},
}
ctx := server.MakeTestContext()
ctx.TestingKnobs.Store = testingKnobs
server, sqlDB, _ := setupWithContext(t, &ctx)
defer cleanup(server, sqlDB)
cleanupFilter := cmdFilters.AppendFilter(
func(args storagebase.FilterArgs) *roachpb.Error {
if restartDone {
return nil
}
if req, ok := args.Req.(*roachpb.ScanRequest); ok {
if bytes.Contains(req.Key, testKey) {
restartDone = true
// Return ReadWithinUncertaintyIntervalError to update the transaction timestamp on rery.
txn := args.Hdr.Txn
txn.ResetObservedTimestamps()
now := server.Clock().Now()
txn.UpdateObservedTimestamp(server.Gossip().GetNodeID(), now)
return roachpb.NewErrorWithTxn(roachpb.NewReadWithinUncertaintyIntervalError(now, now), txn)
}
}
return nil
}, false)
defer cleanupFilter()
// Use a large max offset to avoid rejecting a transaction whose timestanp is in
// future (as we will advance the transaction timestamp with ReadWithinUncertaintyIntervalError).
server.Clock().SetMaxOffset(sql.LeaseDuration * 10)
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k TEXT PRIMARY KEY, v TEXT);
INSERT INTO t.test (k, v) VALUES ('test_key', 'test_val');
`); err != nil {
t.Fatal(err)
}
// Acquire the lease and enable the auto-retry. The first read attempt will trigger ReadWithinUncertaintyIntervalError
// and advance the transaction timestmap. The second read attempt will succeed, but the (elided) EndTransactionRequest
// hits a deadline-exceeded error.
if _, err := sqlDB.Exec(`
SELECT * from t.test WHERE k = 'test_key';
`); err != nil {
t.Fatal(err)
}
if !restartDone {
t.Errorf("expected restart, but it didn't happen")
}
}
|
package main
import (
"testing"
"fmt"
"net"
"strings"
)
const testHost = "localhost"
// TODO hopefully unused. Better ideas?
const testPort = "1234"
const knownNonexistentHost = "nonexistent.janosgyerik.com"
func acceptN(t*testing.T, host, port string, count int, ready chan bool) {
ln, err := net.Listen("tcp", net.JoinHostPort(host, port))
if err != nil {
t.Fatal(err)
}
defer ln.Close()
ready <- true
for i := 0; i < count; i++ {
conn, err := ln.Accept()
if err != nil {
t.Fatal(err)
}
conn.Close()
}
}
func assertPingResult(t*testing.T, host, port string, expected bool, pattern string) {
err := Ping(host, port)
addr := net.JoinHostPort(host, port)
t.Logf("port ping %s -> %v", addr, err)
actual := err == nil
if expected != actual {
var openOrClosed string
if expected {
openOrClosed = "open"
} else {
openOrClosed = "closed"
}
t.Errorf("%s should be %s", addr, openOrClosed)
}
if pattern != "" {
errstr := err.Error()
if !strings.Contains(errstr, pattern) {
t.Errorf("the result was expected to contain %s, but was: %s", pattern, errstr)
}
}
}
func assertPingSuccess(t*testing.T, host, port string) {
assertPingResult(t, host, port, true, "")
}
func assertPingFailure(t*testing.T, host, port string, pattern string) {
assertPingResult(t, host, port, false, pattern)
}
func assertPingNSuccessCount(t*testing.T, host, port string, pingCount int, expectedSuccessCount int) {
c := make(chan error)
go PingN(host, port, pingCount, c)
addr := net.JoinHostPort(host, port)
successCount := 0
for i := 0; i < pingCount; i++ {
err := <-c
t.Logf("port ping %s [%d] -> %v", addr, i + 1, err)
if err == nil {
successCount++
}
}
if expectedSuccessCount != successCount {
t.Errorf("expected %d successful pings, but got only %d", expectedSuccessCount, successCount)
}
}
func Test_ping_open_port(t*testing.T) {
ready := make(chan bool)
go acceptN(t, testHost, testPort, 1, ready)
<-ready
assertPingSuccess(t, testHost, testPort)
// for sanity: acceptN should have shut down already
assertPingFailure(t, testHost, testPort, "connection refused")
}
func Test_ping_unopen_port(t*testing.T) {
assertPingFailure(t, testHost, testPort, "connection refused")
}
func Test_ping_nonexistent_host(t*testing.T) {
assertPingFailure(t, knownNonexistentHost, testPort, "no such host")
}
func Test_ping_negative_port(t*testing.T) {
assertPingFailure(t, testHost, "-1", "invalid port")
}
func Test_ping_too_high_port(t*testing.T) {
assertPingFailure(t, testHost, "123456", "invalid port")
}
func Test_ping5_all_success(t*testing.T) {
pingCount := 3
ready := make(chan bool)
go acceptN(t, testHost, testPort, pingCount, ready)
<-ready
assertPingNSuccessCount(t, testHost, testPort, pingCount, pingCount)
}
func Test_ping5_all_fail(t*testing.T) {
pingCount := 5
successCount := 0
assertPingNSuccessCount(t, testHost, testPort, pingCount, successCount)
}
func Test_ping5_partial_success(t*testing.T) {
successCount := 3
ready := make(chan bool)
go acceptN(t, testHost, testPort, successCount, ready)
<-ready
pingCount := 5
assertPingNSuccessCount(t, testHost, testPort, pingCount, successCount)
}
func assertFormatResult(t*testing.T, host, port string, expected string) {
actual := FormatResult(Ping(host, port))
if expected != actual {
t.Errorf("expected '%s' but got '%s'", expected, actual)
}
}
func Test_format_result_success(t*testing.T) {
ready := make(chan bool)
go acceptN(t, testHost, testPort, 1, ready)
<-ready
assertFormatResult(t, testHost, testPort, "success")
}
func Test_format_result_connection_refused(t*testing.T) {
assertFormatResult(t, testHost, testPort, "getsockopt: connection refused")
}
func Test_format_result_invalid_port_m1(t*testing.T) {
port := "-1"
assertFormatResult(t, testHost, port, fmt.Sprintf("invalid port %s", port))
}
func Test_format_result_invalid_port_123456(t*testing.T) {
port := "123456"
assertFormatResult(t, testHost, port, fmt.Sprintf("invalid port %s", port))
}
func Test_format_result_nonexistent_host(t*testing.T) {
host := knownNonexistentHost
assertFormatResult(t, host, testPort, fmt.Sprintf("lookup %s: no such host", host))
}
flipped to actual expected order
package main
import (
"testing"
"fmt"
"net"
"strings"
)
const testHost = "localhost"
// TODO hopefully unused. Better ideas?
const testPort = "1234"
const knownNonexistentHost = "nonexistent.janosgyerik.com"
func acceptN(t*testing.T, host, port string, count int, ready chan bool) {
ln, err := net.Listen("tcp", net.JoinHostPort(host, port))
if err != nil {
t.Fatal(err)
}
defer ln.Close()
ready <- true
for i := 0; i < count; i++ {
conn, err := ln.Accept()
if err != nil {
t.Fatal(err)
}
conn.Close()
}
}
func assertPingResult(t*testing.T, host, port string, expected bool, pattern string) {
err := Ping(host, port)
addr := net.JoinHostPort(host, port)
t.Logf("port ping %s -> %v", addr, err)
actual := err == nil
if actual != expected {
var openOrClosed string
if expected {
openOrClosed = "open"
} else {
openOrClosed = "closed"
}
t.Errorf("%s should be %s", addr, openOrClosed)
}
if pattern != "" {
errstr := err.Error()
if !strings.Contains(errstr, pattern) {
t.Errorf("the result was expected to contain %s, but was: %s", pattern, errstr)
}
}
}
func assertPingSuccess(t*testing.T, host, port string) {
assertPingResult(t, host, port, true, "")
}
func assertPingFailure(t*testing.T, host, port string, pattern string) {
assertPingResult(t, host, port, false, pattern)
}
func assertPingNSuccessCount(t*testing.T, host, port string, pingCount int, expectedSuccessCount int) {
c := make(chan error)
go PingN(host, port, pingCount, c)
addr := net.JoinHostPort(host, port)
successCount := 0
for i := 0; i < pingCount; i++ {
err := <-c
t.Logf("port ping %s [%d] -> %v", addr, i + 1, err)
if err == nil {
successCount++
}
}
if expectedSuccessCount != successCount {
t.Errorf("expected %d successful pings, but got only %d", expectedSuccessCount, successCount)
}
}
func Test_ping_open_port(t*testing.T) {
ready := make(chan bool)
go acceptN(t, testHost, testPort, 1, ready)
<-ready
assertPingSuccess(t, testHost, testPort)
// for sanity: acceptN should have shut down already
assertPingFailure(t, testHost, testPort, "connection refused")
}
func Test_ping_unopen_port(t*testing.T) {
assertPingFailure(t, testHost, testPort, "connection refused")
}
func Test_ping_nonexistent_host(t*testing.T) {
assertPingFailure(t, knownNonexistentHost, testPort, "no such host")
}
func Test_ping_negative_port(t*testing.T) {
assertPingFailure(t, testHost, "-1", "invalid port")
}
func Test_ping_too_high_port(t*testing.T) {
assertPingFailure(t, testHost, "123456", "invalid port")
}
func Test_ping5_all_success(t*testing.T) {
pingCount := 3
ready := make(chan bool)
go acceptN(t, testHost, testPort, pingCount, ready)
<-ready
assertPingNSuccessCount(t, testHost, testPort, pingCount, pingCount)
}
func Test_ping5_all_fail(t*testing.T) {
pingCount := 5
successCount := 0
assertPingNSuccessCount(t, testHost, testPort, pingCount, successCount)
}
func Test_ping5_partial_success(t*testing.T) {
successCount := 3
ready := make(chan bool)
go acceptN(t, testHost, testPort, successCount, ready)
<-ready
pingCount := 5
assertPingNSuccessCount(t, testHost, testPort, pingCount, successCount)
}
func assertFormatResult(t*testing.T, host, port string, expected string) {
actual := FormatResult(Ping(host, port))
if actual != expected {
t.Errorf("expected '%s' but got '%s'", expected, actual)
}
}
func Test_format_result_success(t*testing.T) {
ready := make(chan bool)
go acceptN(t, testHost, testPort, 1, ready)
<-ready
assertFormatResult(t, testHost, testPort, "success")
}
func Test_format_result_connection_refused(t*testing.T) {
assertFormatResult(t, testHost, testPort, "getsockopt: connection refused")
}
func Test_format_result_invalid_port_m1(t*testing.T) {
port := "-1"
assertFormatResult(t, testHost, port, fmt.Sprintf("invalid port %s", port))
}
func Test_format_result_invalid_port_123456(t*testing.T) {
port := "123456"
assertFormatResult(t, testHost, port, fmt.Sprintf("invalid port %s", port))
}
func Test_format_result_nonexistent_host(t*testing.T) {
host := knownNonexistentHost
assertFormatResult(t, host, testPort, fmt.Sprintf("lookup %s: no such host", host))
}
|
package peerwriter
import (
"bytes"
"container/list"
"encoding/binary"
"io"
"net"
"time"
"github.com/cenkalti/rain/internal/logger"
"github.com/cenkalti/rain/internal/peerconn/peerreader"
"github.com/cenkalti/rain/internal/peerprotocol"
)
const keepAlivePeriod = 2 * time.Minute
type PeerWriter struct {
conn net.Conn
queueC chan peerprotocol.Message
cancelC chan peerprotocol.CancelMessage
writeQueue *list.List
writeC chan peerprotocol.Message
messages chan interface{}
log logger.Logger
stopC chan struct{}
doneC chan struct{}
}
func New(conn net.Conn, l logger.Logger) *PeerWriter {
return &PeerWriter{
conn: conn,
queueC: make(chan peerprotocol.Message),
cancelC: make(chan peerprotocol.CancelMessage),
writeQueue: list.New(),
writeC: make(chan peerprotocol.Message),
messages: make(chan interface{}),
log: l,
stopC: make(chan struct{}),
doneC: make(chan struct{}),
}
}
func (p *PeerWriter) Messages() <-chan interface{} {
return p.messages
}
func (p *PeerWriter) SendMessage(msg peerprotocol.Message) {
select {
case p.queueC <- msg:
case <-p.doneC:
}
}
func (p *PeerWriter) SendPiece(msg peerprotocol.RequestMessage, pi io.ReaderAt) {
m := Piece{Piece: pi, Index: msg.Index, Begin: msg.Begin, Length: msg.Length}
select {
case p.queueC <- m:
case <-p.doneC:
}
}
func (p *PeerWriter) CancelRequest(msg peerprotocol.CancelMessage) {
select {
case p.cancelC <- msg:
case <-p.doneC:
}
}
func (p *PeerWriter) Stop() {
close(p.stopC)
}
func (p *PeerWriter) Done() chan struct{} {
return p.doneC
}
func (p *PeerWriter) Run() {
defer close(p.doneC)
go p.messageWriter()
for {
var (
e *list.Element
msg peerprotocol.Message
writeC chan peerprotocol.Message
)
if p.writeQueue.Len() > 0 {
e = p.writeQueue.Front()
msg = e.Value.(peerprotocol.Message)
writeC = p.writeC
}
select {
case msg = <-p.queueC:
p.queueMessage(msg)
case writeC <- msg:
p.writeQueue.Remove(e)
case cm := <-p.cancelC:
p.cancelRequest(cm)
case <-p.stopC:
return
}
}
}
func (p *PeerWriter) queueMessage(msg peerprotocol.Message) {
if _, ok := msg.(peerprotocol.ChokeMessage); ok {
p.cancelQueuedPieceMessages()
}
p.writeQueue.PushBack(msg)
}
func (p *PeerWriter) cancelQueuedPieceMessages() {
var next *list.Element
for e := p.writeQueue.Front(); e != nil; e = next {
next = e.Next()
if _, ok := e.Value.(Piece); ok {
p.writeQueue.Remove(e)
}
}
}
func (p *PeerWriter) cancelRequest(cm peerprotocol.CancelMessage) {
for e := p.writeQueue.Front(); e != nil; e = e.Next() {
if pi, ok := e.Value.(Piece); ok && pi.Index == cm.Index && pi.Begin == cm.Begin && pi.Length == cm.Length {
p.writeQueue.Remove(e)
break
}
}
}
func (p *PeerWriter) messageWriter() {
defer p.conn.Close()
// Disable write deadline that is previously set by handshaker.
err := p.conn.SetWriteDeadline(time.Time{})
if _, ok := err.(*net.OpError); ok {
p.log.Debugln("cannot set deadline:", err)
return
}
if err != nil {
p.log.Error(err)
return
}
keepAliveTicker := time.NewTicker(keepAlivePeriod / 2)
defer keepAliveTicker.Stop()
// Use a fixed-size array for slice storage.
// Length is calculated for a piece message at max block size.
// Length = 4 bytes length + 1 byte messageID + 8 bytes piece header + <MaxBlockSize> piece data
// This will reduce allocations in loop below.
var a [4 + 1 + 8 + peerreader.MaxBlockSize]byte
b := a[:0]
for {
select {
case msg := <-p.writeC:
// p.log.Debugf("writing message of type: %q", msg.ID())
buf := bytes.NewBuffer(b)
// Reserve space for length and message ID
buf.Write([]byte{0, 0, 0, 0, 0})
if wt, ok := msg.(io.WriterTo); ok {
_, err = wt.WriteTo(buf)
} else {
_, err = buf.ReadFrom(msg)
}
if err != nil {
p.log.Errorf("cannot serialize message [%v]: %s", msg.ID(), err.Error())
return
}
// Put length
binary.BigEndian.PutUint32(buf.Bytes()[:4], uint32(1+buf.Len()-5))
// Put message ID
buf.Bytes()[4] = uint8(msg.ID())
n, err := p.conn.Write(buf.Bytes())
if _, ok := msg.(Piece); ok {
p.countUploadBytes(msg, n)
}
if _, ok := err.(*net.OpError); ok {
p.log.Debugf("cannot write message [%v]: %s", msg.ID(), err.Error())
return
}
if err != nil {
p.log.Errorf("cannot write message [%v]: %s", msg.ID(), err.Error())
return
}
case <-keepAliveTicker.C:
_, err := p.conn.Write([]byte{0, 0, 0, 0})
if _, ok := err.(*net.OpError); ok {
p.log.Debugf("cannot write keepalive message: %s", err.Error())
return
}
if err != nil {
p.log.Errorf("cannot write keepalive message: %s", err.Error())
return
}
case <-p.stopC:
return
}
}
}
func (p *PeerWriter) countUploadBytes(msg peerprotocol.Message, n int) {
n -= 13 // message + piece header
if n < 0 {
n = 0
}
uploaded := uint32(n)
if uploaded > 0 {
select {
case p.messages <- BlockUploaded{Length: uploaded}:
case <-p.stopC:
}
}
}
fix linter error
package peerwriter
import (
"bytes"
"container/list"
"encoding/binary"
"io"
"net"
"time"
"github.com/cenkalti/rain/internal/logger"
"github.com/cenkalti/rain/internal/peerconn/peerreader"
"github.com/cenkalti/rain/internal/peerprotocol"
)
const keepAlivePeriod = 2 * time.Minute
type PeerWriter struct {
conn net.Conn
queueC chan peerprotocol.Message
cancelC chan peerprotocol.CancelMessage
writeQueue *list.List
writeC chan peerprotocol.Message
messages chan interface{}
log logger.Logger
stopC chan struct{}
doneC chan struct{}
}
func New(conn net.Conn, l logger.Logger) *PeerWriter {
return &PeerWriter{
conn: conn,
queueC: make(chan peerprotocol.Message),
cancelC: make(chan peerprotocol.CancelMessage),
writeQueue: list.New(),
writeC: make(chan peerprotocol.Message),
messages: make(chan interface{}),
log: l,
stopC: make(chan struct{}),
doneC: make(chan struct{}),
}
}
func (p *PeerWriter) Messages() <-chan interface{} {
return p.messages
}
func (p *PeerWriter) SendMessage(msg peerprotocol.Message) {
select {
case p.queueC <- msg:
case <-p.doneC:
}
}
func (p *PeerWriter) SendPiece(msg peerprotocol.RequestMessage, pi io.ReaderAt) {
m := Piece{Piece: pi, Index: msg.Index, Begin: msg.Begin, Length: msg.Length}
select {
case p.queueC <- m:
case <-p.doneC:
}
}
func (p *PeerWriter) CancelRequest(msg peerprotocol.CancelMessage) {
select {
case p.cancelC <- msg:
case <-p.doneC:
}
}
func (p *PeerWriter) Stop() {
close(p.stopC)
}
func (p *PeerWriter) Done() chan struct{} {
return p.doneC
}
func (p *PeerWriter) Run() {
defer close(p.doneC)
go p.messageWriter()
for {
var (
e *list.Element
msg peerprotocol.Message
writeC chan peerprotocol.Message
)
if p.writeQueue.Len() > 0 {
e = p.writeQueue.Front()
msg = e.Value.(peerprotocol.Message)
writeC = p.writeC
}
select {
case msg = <-p.queueC:
p.queueMessage(msg)
case writeC <- msg:
p.writeQueue.Remove(e)
case cm := <-p.cancelC:
p.cancelRequest(cm)
case <-p.stopC:
return
}
}
}
func (p *PeerWriter) queueMessage(msg peerprotocol.Message) {
if _, ok := msg.(peerprotocol.ChokeMessage); ok {
p.cancelQueuedPieceMessages()
}
p.writeQueue.PushBack(msg)
}
func (p *PeerWriter) cancelQueuedPieceMessages() {
var next *list.Element
for e := p.writeQueue.Front(); e != nil; e = next {
next = e.Next()
if _, ok := e.Value.(Piece); ok {
p.writeQueue.Remove(e)
}
}
}
func (p *PeerWriter) cancelRequest(cm peerprotocol.CancelMessage) {
for e := p.writeQueue.Front(); e != nil; e = e.Next() {
if pi, ok := e.Value.(Piece); ok && pi.Index == cm.Index && pi.Begin == cm.Begin && pi.Length == cm.Length {
p.writeQueue.Remove(e)
break
}
}
}
func (p *PeerWriter) messageWriter() {
defer p.conn.Close()
// Disable write deadline that is previously set by handshaker.
err := p.conn.SetWriteDeadline(time.Time{})
if _, ok := err.(*net.OpError); ok {
p.log.Debugln("cannot set deadline:", err)
return
}
if err != nil {
p.log.Error(err)
return
}
keepAliveTicker := time.NewTicker(keepAlivePeriod / 2)
defer keepAliveTicker.Stop()
// Use a fixed-size array for slice storage.
// Length is calculated for a piece message at max block size.
// Length = 4 bytes length + 1 byte messageID + 8 bytes piece header + <MaxBlockSize> piece data
// This will reduce allocations in loop below.
var a [4 + 1 + 8 + peerreader.MaxBlockSize]byte
b := a[:0]
for {
select {
case msg := <-p.writeC:
// p.log.Debugf("writing message of type: %q", msg.ID())
buf := bytes.NewBuffer(b)
// Reserve space for length and message ID
buf.Write([]byte{0, 0, 0, 0, 0})
if wt, ok := msg.(io.WriterTo); ok {
_, err = wt.WriteTo(buf)
} else {
_, err = buf.ReadFrom(msg)
}
if err != nil {
p.log.Errorf("cannot serialize message [%v]: %s", msg.ID(), err.Error())
return
}
// Put length
binary.BigEndian.PutUint32(buf.Bytes()[:4], uint32(1+buf.Len()-5))
// Put message ID
buf.Bytes()[4] = uint8(msg.ID())
n, err := p.conn.Write(buf.Bytes())
if _, ok := msg.(Piece); ok {
p.countUploadBytes(n)
}
if _, ok := err.(*net.OpError); ok {
p.log.Debugf("cannot write message [%v]: %s", msg.ID(), err.Error())
return
}
if err != nil {
p.log.Errorf("cannot write message [%v]: %s", msg.ID(), err.Error())
return
}
case <-keepAliveTicker.C:
_, err := p.conn.Write([]byte{0, 0, 0, 0})
if _, ok := err.(*net.OpError); ok {
p.log.Debugf("cannot write keepalive message: %s", err.Error())
return
}
if err != nil {
p.log.Errorf("cannot write keepalive message: %s", err.Error())
return
}
case <-p.stopC:
return
}
}
}
func (p *PeerWriter) countUploadBytes(n int) {
n -= 13 // message + piece header
if n < 0 {
n = 0
}
uploaded := uint32(n)
if uploaded > 0 {
select {
case p.messages <- BlockUploaded{Length: uploaded}:
case <-p.stopC:
}
}
}
|
package tumblr
// Defines each subtype of Post (see consts below) and factory methods
// Post Types
const (
Text = "text"
Quote = "quote"
Link = "link"
Answer = "answer"
Video = "video"
Audio = "audio"
Photo = "photo"
Chat = "chat"
)
// Stuff in the "response":"posts" field
type Post struct {
BlogName string
Id int64
PostURL string
Type string
Timestamp int64
Date string
Format string
ReblogKey string
Tags []string
Bookmarklet bool
Mobile bool
SourceURL string
SourceTitle string
Liked bool
State string // published, ueued, draft, private
TotalPosts int64 // total posts in result set for pagination
}
type TextPost struct {
Post
Title string
Body string
}
// Photo post
type PhotoPost struct {
Post
Photos []PhotoData
Caption string
Width int64
Height int64
}
// One photo in a PhotoPost
type PhotoData struct {
Caption string // photosets only
AltSizes []AltSizeData
}
// One alternate size of a Photo
type AltSizeData struct {
Width int
Height int
URL string
}
// Quote post
type QuotePost struct {
Post
Text string
Source string
}
// Link post
type LinkPost struct {
Post
Title string
URL string
Description string
}
// Chat post
type ChatPost struct {
Post
Title string
Body string
Dialogue []DialogueData
}
// One component of a conversation in a Dialogue in a Chat
type DialogueData struct {
Name string
Label string
Phrase string
}
// Audio post
type AudioPost struct {
Post
Caption string
Player string
Plays int64
AlbumArt string
Artist string
Album string
TrackName string
TrackNumber int64
Year int
}
// Video post - TODO Handle all the different sources - not documented :(
type VideoPost struct {
Post
Caption string
Player []EmbedObjectData
}
// One embedded video player in a VideoPost
type EmbedObjectData struct {
Width int
EmbedCode string
}
// Answer post
type AnswerPost struct {
Post
AskingName string
AskingURL string
Question string
Answer string
}
Give the post entity types sort of an enum. import encoding/json
package tumblr
// Defines each subtype of Post (see consts below) and factory methods
import (
"encoding/json"
)
// Post Types
type PostType int
const (
Text = iota
Quote
Link
Answer
Video
Audio
Photo
Chat
)
// Stuff in the "response":"posts" field
type Post struct {
BlogName string
Id int64
PostURL string
Type string
Timestamp int64
Date string
Format string
ReblogKey string
Tags []string
Bookmarklet bool
Mobile bool
SourceURL string
SourceTitle string
Liked bool
State string // published, ueued, draft, private
TotalPosts int64 // total posts in result set for pagination
}
type TextPost struct {
Post
Title string
Body string
}
// Photo post
type PhotoPost struct {
Post
Photos []PhotoData
Caption string
Width int64
Height int64
}
// One photo in a PhotoPost
type PhotoData struct {
Caption string // photosets only
AltSizes []AltSizeData
}
// One alternate size of a Photo
type AltSizeData struct {
Width int
Height int
URL string
}
// Quote post
type QuotePost struct {
Post
Text string
Source string
}
// Link post
type LinkPost struct {
Post
Title string
URL string
Description string
}
// Chat post
type ChatPost struct {
Post
Title string
Body string
Dialogue []DialogueData
}
// One component of a conversation in a Dialogue in a Chat
type DialogueData struct {
Name string
Label string
Phrase string
}
// Audio post
type AudioPost struct {
Post
Caption string
Player string
Plays int64
AlbumArt string
Artist string
Album string
TrackName string
TrackNumber int64
Year int
}
// Video post - TODO Handle all the different sources - not documented :(
type VideoPost struct {
Post
Caption string
Player []EmbedObjectData
}
// One embedded video player in a VideoPost
type EmbedObjectData struct {
Width int
EmbedCode string
}
// Answer post
type AnswerPost struct {
Post
AskingName string
AskingURL string
Question string
Answer string
}
|
package tumblr
// Defines each subtype of Post (see consts below) and factory methods
import (
"encoding/json"
)
// Post Types
type PostType int
const (
Unknown PostType = iota
Text
Quote
Link
Answer
Video
Audio
Photo
Chat
)
// Return the PostType of the type described in the JSON
func TypeOfPost(t string) PostType {
d := Unknown
switch t {
case "text":
d = Text
case "quote":
d = Quote
case "link":
d = Link
case "answer":
d = Answer
case "video":
d = Video
case "audio":
d = Audio
case "photo":
d = Photo
case "chat":
d = Chat
}
return d
}
type PostCollection struct {
Posts []Post // A conjunction of the below
TextPosts []TextPost
QuotePosts []QuotePost
LinkPosts []LinkPost
AnswerPosts []AnswerPost
VideoPosts []VideoPost
AudioPosts []AudioPost
PhotoPosts []PhotoPost
ChatPosts []ChatPost
}
// Constructs a PostCollection of typed Posts given the json.RawMessage
// of "response":"posts" which must be an array
func NewPostCollection(r *json.RawMessage) (*PostCollection, error) {
rawPosts := []json.RawMessage{}
err := json.Unmarshal(*r, &rawPosts)
if err != nil {
return nil, err
}
pc := &PostCollection{}
// Append the post to the right field
for _, rp := range rawPosts {
// Extract most generic sections first
var p PostBase
err = json.Unmarshal(rp, &p)
if err != nil {
return nil, err
}
// Based on the type of the post, create a TypePost (sp = specific post)
switch p.PostType() {
case Text:
var sp TextPost
json.Unmarshal(rp, &sp)
pc.TextPosts = append(pc.TextPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Quote:
var sp QuotePost
json.Unmarshal(rp, &sp)
pc.QuotePosts = append(pc.QuotePosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Link:
var sp LinkPost
json.Unmarshal(rp, &sp)
pc.LinkPosts = append(pc.LinkPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Answer:
var sp AnswerPost
json.Unmarshal(rp, &sp)
pc.AnswerPosts = append(pc.AnswerPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Video:
var sp VideoPost
json.Unmarshal(rp, &sp)
pc.VideoPosts = append(pc.VideoPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Audio:
var sp AudioPost
json.Unmarshal(rp, &sp)
pc.AudioPosts = append(pc.AudioPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Photo:
var sp PhotoPost
json.Unmarshal(rp, &sp)
pc.PhotoPosts = append(pc.PhotoPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Chat:
var sp ChatPost
json.Unmarshal(rp, &sp)
pc.ChatPosts = append(pc.ChatPosts, sp)
pc.Posts = append(pc.Posts, &sp)
}
}
return pc, nil
}
// Stuff in the "response":"posts" field
type PostBase struct {
BlogName string
Id int64
PostURL string
Type string
Timestamp int64
Date string
Format string
ReblogKey string
Tags []string
Bookmarklet bool
Mobile bool
SourceURL string
SourceTitle string
Liked bool
State string // published, ueued, draft, private
Notes []NoteData
TotalPosts int64 // total posts in result set for pagination
}
// Accessors for the common fields of a Post
type Post interface {
PostBlogName() string
PostId() int64
PostPostURL() string
PostTimestamp() int64
PostType() PostType
PostDate() string
PostFormat() string
PostReblogKey() string
PostTags() []string
PostBookmarklet() bool
PostMobile() bool
PostSourceURL() string
PostSourceTitle() string
PostLiked() bool
PostState() string // published, ueued, draft, private
PostNotes() []NoteData
PostTotalPosts() int64 // total posts in result set for pagination
}
func (p *PostBase) PostBlogName() string { return p.BlogName }
func (p *PostBase) PostId() int64 { return p.Id }
func (p *PostBase) PostPostURL() string { return p.PostURL }
func (p *PostBase) PostType() PostType { return TypeOfPost(p.Type) }
func (p *PostBase) PostTimestamp() int64 { return p.Timestamp }
func (p *PostBase) PostDate() string { return p.Date }
func (p *PostBase) PostFormat() string { return p.Format }
func (p *PostBase) PostReblogKey() string { return p.ReblogKey }
func (p *PostBase) PostTags() []string { return p.Tags }
func (p *PostBase) PostBookmarklet() bool { return p.Bookmarklet }
func (p *PostBase) PostMobile() bool { return p.Mobile }
func (p *PostBase) PostSourceURL() string { return p.SourceURL }
func (p *PostBase) PostSourceTitle() string { return p.SourceTitle }
func (p *PostBase) PostLiked() bool { return p.Liked }
func (p *PostBase) PostState() string { return p.State }
func (p *PostBase) PostNotes() []NoteData { return p.Notes }
func (p *PostBase) PostTotalPosts() int64 { return p.TotalPosts }
// Text post
type TextPost struct {
PostBase
Title string
Body string
}
// Photo post
type PhotoPost struct {
PostBase
Photos []PhotoData
Caption string
Width int64
Height int64
}
// One photo in a PhotoPost
type PhotoData struct {
Caption string // photosets only
AltSizes []AltSizeData
}
// One alternate size of a Photo
type AltSizeData struct {
Width int
Height int
URL string
}
// Quote post
type QuotePost struct {
PostBase
Text string
Source string
}
// Link post
type LinkPost struct {
PostBase
Title string
URL string
Description string
}
// Chat post
type ChatPost struct {
PostBase
Title string
Body string
Dialogue []DialogueData
}
// One component of a conversation in a Dialogue in a Chat
type DialogueData struct {
Name string
Label string
Phrase string
}
// Audio post
type AudioPost struct {
PostBase
Caption string
Player string
Plays int64
AlbumArt string
Artist string
Album string
TrackName string
TrackNumber int64
Year int
}
// Video post - TODO Handle all the different sources - not documented :(
type VideoPost struct {
PostBase
Caption string
Players []EmbedObjectData
}
// One embedded video player in a VideoPost
type EmbedObjectData struct {
Width int
EmbedCode string
}
// Answer post
type AnswerPost struct {
PostBase
AskingName string
AskingURL string
Question string
Answer string
}
// General notes information
// {
// "timestamp": "1401041794",
// "blog_name": "nalisification",
// "blog_url": "http://nalisification.tumblr.com/",
// "post_id": "1234",
// "type": "reblog"
// },
type NoteData struct {
Timestamp int64
BlogName string
BlogURL string
PostID int64
Type string
}
Tumblr thinks that number are strings now
package tumblr
// Defines each subtype of Post (see consts below) and factory methods
import (
"encoding/json"
)
// Post Types
type PostType int
const (
Unknown PostType = iota
Text
Quote
Link
Answer
Video
Audio
Photo
Chat
)
// Return the PostType of the type described in the JSON
func TypeOfPost(t string) PostType {
d := Unknown
switch t {
case "text":
d = Text
case "quote":
d = Quote
case "link":
d = Link
case "answer":
d = Answer
case "video":
d = Video
case "audio":
d = Audio
case "photo":
d = Photo
case "chat":
d = Chat
}
return d
}
type PostCollection struct {
Posts []Post // A conjunction of the below
TextPosts []TextPost
QuotePosts []QuotePost
LinkPosts []LinkPost
AnswerPosts []AnswerPost
VideoPosts []VideoPost
AudioPosts []AudioPost
PhotoPosts []PhotoPost
ChatPosts []ChatPost
}
// Constructs a PostCollection of typed Posts given the json.RawMessage
// of "response":"posts" which must be an array
func NewPostCollection(r *json.RawMessage) (*PostCollection, error) {
rawPosts := []json.RawMessage{}
err := json.Unmarshal(*r, &rawPosts)
if err != nil {
return nil, err
}
pc := &PostCollection{}
// Append the post to the right field
for _, rp := range rawPosts {
// Extract most generic sections first
var p PostBase
err = json.Unmarshal(rp, &p)
if err != nil {
return nil, err
}
// Based on the type of the post, create a TypePost (sp = specific post)
switch p.PostType() {
case Text:
var sp TextPost
json.Unmarshal(rp, &sp)
pc.TextPosts = append(pc.TextPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Quote:
var sp QuotePost
json.Unmarshal(rp, &sp)
pc.QuotePosts = append(pc.QuotePosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Link:
var sp LinkPost
json.Unmarshal(rp, &sp)
pc.LinkPosts = append(pc.LinkPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Answer:
var sp AnswerPost
json.Unmarshal(rp, &sp)
pc.AnswerPosts = append(pc.AnswerPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Video:
var sp VideoPost
json.Unmarshal(rp, &sp)
pc.VideoPosts = append(pc.VideoPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Audio:
var sp AudioPost
json.Unmarshal(rp, &sp)
pc.AudioPosts = append(pc.AudioPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Photo:
var sp PhotoPost
json.Unmarshal(rp, &sp)
pc.PhotoPosts = append(pc.PhotoPosts, sp)
pc.Posts = append(pc.Posts, &sp)
case Chat:
var sp ChatPost
json.Unmarshal(rp, &sp)
pc.ChatPosts = append(pc.ChatPosts, sp)
pc.Posts = append(pc.Posts, &sp)
}
}
return pc, nil
}
// Stuff in the "response":"posts" field
type PostBase struct {
BlogName string
Id int64
PostURL string
Type string
Timestamp int64
Date string
Format string
ReblogKey string
Tags []string
Bookmarklet bool
Mobile bool
SourceURL string
SourceTitle string
Liked bool
State string // published, ueued, draft, private
Notes []NoteData
TotalPosts int64 // total posts in result set for pagination
}
// Accessors for the common fields of a Post
type Post interface {
PostBlogName() string
PostId() int64
PostPostURL() string
PostTimestamp() int64
PostType() PostType
PostDate() string
PostFormat() string
PostReblogKey() string
PostTags() []string
PostBookmarklet() bool
PostMobile() bool
PostSourceURL() string
PostSourceTitle() string
PostLiked() bool
PostState() string // published, ueued, draft, private
PostNotes() []NoteData
PostTotalPosts() int64 // total posts in result set for pagination
}
func (p *PostBase) PostBlogName() string { return p.BlogName }
func (p *PostBase) PostId() int64 { return p.Id }
func (p *PostBase) PostPostURL() string { return p.PostURL }
func (p *PostBase) PostType() PostType { return TypeOfPost(p.Type) }
func (p *PostBase) PostTimestamp() int64 { return p.Timestamp }
func (p *PostBase) PostDate() string { return p.Date }
func (p *PostBase) PostFormat() string { return p.Format }
func (p *PostBase) PostReblogKey() string { return p.ReblogKey }
func (p *PostBase) PostTags() []string { return p.Tags }
func (p *PostBase) PostBookmarklet() bool { return p.Bookmarklet }
func (p *PostBase) PostMobile() bool { return p.Mobile }
func (p *PostBase) PostSourceURL() string { return p.SourceURL }
func (p *PostBase) PostSourceTitle() string { return p.SourceTitle }
func (p *PostBase) PostLiked() bool { return p.Liked }
func (p *PostBase) PostState() string { return p.State }
func (p *PostBase) PostNotes() []NoteData { return p.Notes }
func (p *PostBase) PostTotalPosts() int64 { return p.TotalPosts }
// Text post
type TextPost struct {
PostBase
Title string
Body string
}
// Photo post
type PhotoPost struct {
PostBase
Photos []PhotoData
Caption string
Width int64
Height int64
}
// One photo in a PhotoPost
type PhotoData struct {
Caption string // photosets only
AltSizes []AltSizeData
}
// One alternate size of a Photo
type AltSizeData struct {
Width int
Height int
URL string
}
// Quote post
type QuotePost struct {
PostBase
Text string
Source string
}
// Link post
type LinkPost struct {
PostBase
Title string
URL string
Description string
}
// Chat post
type ChatPost struct {
PostBase
Title string
Body string
Dialogue []DialogueData
}
// One component of a conversation in a Dialogue in a Chat
type DialogueData struct {
Name string
Label string
Phrase string
}
// Audio post
type AudioPost struct {
PostBase
Caption string
Player string
Plays int64
AlbumArt string
Artist string
Album string
TrackName string
TrackNumber int64
Year int
}
// Video post - TODO Handle all the different sources - not documented :(
type VideoPost struct {
PostBase
Caption string
Players []EmbedObjectData
}
// One embedded video player in a VideoPost
type EmbedObjectData struct {
Width int
EmbedCode string
}
// Answer post
type AnswerPost struct {
PostBase
AskingName string
AskingURL string
Question string
Answer string
}
// General notes information
// {
// "timestamp": "1401041794",
// "blog_name": "nalisification",
// "blog_url": "http://nalisification.tumblr.com/",
// "post_id": "1234",
// "type": "reblog"
// },
type NoteData struct {
Timestamp string // wtf
BlogName string
BlogURL string
PostID string // wtf
Type string // reblog, like, post, ...?
}
|
/*
* Copyright 2015 Manish R Jain <manishrjain@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package posting
import (
"math/rand"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"github.com/Sirupsen/logrus"
"github.com/dgraph-io/dgraph/commit"
"github.com/dgraph-io/dgraph/store"
"github.com/dgryski/go-farm"
"github.com/zond/gotomic"
)
type counters struct {
ticker *time.Ticker
added uint64
merged uint64
}
func (c *counters) periodicLog() {
for _ = range c.ticker.C {
mapSize := lhmap.Size()
added := atomic.LoadUint64(&c.added)
merged := atomic.LoadUint64(&c.merged)
pending := added - merged
glog.WithFields(logrus.Fields{
"added": added,
"merged": merged,
"pending": pending,
"mapsize": mapSize,
}).Info("List Merge counters")
}
}
var MAX_MEMORY uint64
var MIB uint64
func checkMemoryUsage() {
MIB = 1 << 20
MAX_MEMORY = 3 * (1 << 30)
for _ = range time.Tick(5 * time.Second) {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
if ms.Alloc < MAX_MEMORY {
continue
}
// Okay, we exceed the max memory threshold.
// Stop the world, and deal with this first.
stopTheWorld.Lock()
megs := ms.Alloc / MIB
glog.WithField("allocated_MB", megs).
Info("Memory usage over threshold. STOPPED THE WORLD!")
glog.Info("Calling merge on all lists.")
MergeLists(100 * runtime.GOMAXPROCS(-1))
glog.Info("Merged lists. Calling GC.")
runtime.GC() // Call GC to do some cleanup.
glog.Info("Trying to free OS memory")
debug.FreeOSMemory()
runtime.ReadMemStats(&ms)
megs = ms.Alloc / MIB
glog.WithField("allocated_MB", megs).
Info("Memory Usage after calling GC.")
stopTheWorld.Unlock()
}
}
var stopTheWorld sync.RWMutex
var lhmap *gotomic.Hash
var pstore *store.Store
var clog *commit.Logger
func Init(posting *store.Store, log *commit.Logger) {
lhmap = gotomic.NewHash()
pstore = posting
clog = log
lc = new(lcounters)
go checkMemoryUsage()
}
func GetOrCreate(key []byte) *List {
stopTheWorld.RLock()
defer stopTheWorld.RUnlock()
uid := farm.Fingerprint64(key)
ukey := gotomic.IntKey(uid)
lp, _ := lhmap.Get(ukey)
if lp != nil {
return lp.(*List)
}
l := NewList()
l.init(key, pstore, clog)
if inserted := lhmap.PutIfMissing(ukey, l); inserted {
return l
} else {
lp, _ = lhmap.Get(ukey)
return lp.(*List)
}
}
func processOne(k gotomic.Hashable, c *counters) {
ret, _ := lhmap.Delete(k)
l := ret.(*List)
if l == nil {
return
}
l.SetForDeletion() // No more AddMutation.
if err := l.MergeIfDirty(); err != nil {
glog.WithError(err).Error("While commiting dirty list.")
}
atomic.AddUint64(&c.merged, 1)
}
// For on-demand merging of all lists.
func process(ch chan gotomic.Hashable, c *counters, wg *sync.WaitGroup) {
for l := range ch {
processOne(l, c)
}
if wg != nil {
wg.Done()
}
}
func queueAll(ch chan gotomic.Hashable, c *counters) {
lhmap.Each(func(k gotomic.Hashable, v gotomic.Thing) bool {
ch <- k
atomic.AddUint64(&c.added, 1)
return false // If this returns true, Each would break.
})
close(ch)
}
func MergeLists(numRoutines int) {
ch := make(chan gotomic.Hashable, 10000)
c := new(counters)
c.ticker = time.NewTicker(time.Second)
go c.periodicLog()
go queueAll(ch, c)
wg := new(sync.WaitGroup)
for i := 0; i < numRoutines; i++ {
wg.Add(1)
go process(ch, c, wg)
}
wg.Wait()
c.ticker.Stop()
}
// For periodic merging of lists.
func queueRandomLists(ch chan gotomic.Hashable, c *counters) {
var buf []gotomic.Hashable
var count int
needed := cap(ch) - len(ch)
if needed < 100 {
return
}
// Generate a random list of
lhmap.Each(func(k gotomic.Hashable, v gotomic.Thing) bool {
if count < needed {
buf = append(buf, k)
} else {
j := rand.Intn(count)
if j < len(buf) {
buf[j] = k
}
}
count += 1
return false
})
for _, k := range buf {
ch <- k
atomic.AddUint64(&c.added, 1)
}
}
func periodicQueueForProcessing(ch chan gotomic.Hashable, c *counters) {
ticker := time.NewTicker(time.Minute)
for _ = range ticker.C {
queueRandomLists(ch, c)
}
}
func periodicProcess(ch chan gotomic.Hashable, c *counters) {
ticker := time.NewTicker(100 * time.Millisecond)
for _ = range ticker.C {
hid := <-ch
processOne(hid, c)
}
}
func StartPeriodicMerging() {
ctr := new(counters)
ch := make(chan gotomic.Hashable, 10000)
go periodicQueueForProcessing(ch, ctr)
go periodicProcess(ch, ctr)
}
allow setting threshold ram usage via flag. Fix a bug with nil interface conversion.
/*
* Copyright 2015 Manish R Jain <manishrjain@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package posting
import (
"flag"
"math/rand"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"github.com/Sirupsen/logrus"
"github.com/dgraph-io/dgraph/commit"
"github.com/dgraph-io/dgraph/store"
"github.com/dgryski/go-farm"
"github.com/zond/gotomic"
)
var maxmemory = flag.Uint64("threshold_ram_mb", 3072,
"If RAM usage exceeds this, we stop the world, and flush our buffers.")
type counters struct {
ticker *time.Ticker
added uint64
merged uint64
}
func (c *counters) periodicLog() {
for _ = range c.ticker.C {
mapSize := lhmap.Size()
added := atomic.LoadUint64(&c.added)
merged := atomic.LoadUint64(&c.merged)
pending := added - merged
glog.WithFields(logrus.Fields{
"added": added,
"merged": merged,
"pending": pending,
"mapsize": mapSize,
}).Info("List Merge counters")
}
}
var MAX_MEMORY uint64
var MIB uint64
func checkMemoryUsage() {
MIB = 1 << 20
MAX_MEMORY = *maxmemory * (1 << 20)
for _ = range time.Tick(5 * time.Second) {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
if ms.Alloc < MAX_MEMORY {
continue
}
// Okay, we exceed the max memory threshold.
// Stop the world, and deal with this first.
stopTheWorld.Lock()
megs := ms.Alloc / MIB
glog.WithField("allocated_MB", megs).
Info("Memory usage over threshold. STOPPED THE WORLD!")
glog.Info("Calling merge on all lists.")
MergeLists(100 * runtime.GOMAXPROCS(-1))
glog.Info("Merged lists. Calling GC.")
runtime.GC() // Call GC to do some cleanup.
glog.Info("Trying to free OS memory")
debug.FreeOSMemory()
runtime.ReadMemStats(&ms)
megs = ms.Alloc / MIB
glog.WithField("allocated_MB", megs).
Info("Memory Usage after calling GC.")
stopTheWorld.Unlock()
}
}
var stopTheWorld sync.RWMutex
var lhmap *gotomic.Hash
var pstore *store.Store
var clog *commit.Logger
func Init(posting *store.Store, log *commit.Logger) {
lhmap = gotomic.NewHash()
pstore = posting
clog = log
lc = new(lcounters)
go checkMemoryUsage()
}
func GetOrCreate(key []byte) *List {
stopTheWorld.RLock()
defer stopTheWorld.RUnlock()
uid := farm.Fingerprint64(key)
ukey := gotomic.IntKey(uid)
lp, _ := lhmap.Get(ukey)
if lp != nil {
return lp.(*List)
}
l := NewList()
l.init(key, pstore, clog)
if inserted := lhmap.PutIfMissing(ukey, l); inserted {
return l
} else {
lp, _ = lhmap.Get(ukey)
return lp.(*List)
}
}
func processOne(k gotomic.Hashable, c *counters) {
ret, _ := lhmap.Delete(k)
if ret == nil {
return
}
l := ret.(*List)
if l == nil {
return
}
l.SetForDeletion() // No more AddMutation.
if err := l.MergeIfDirty(); err != nil {
glog.WithError(err).Error("While commiting dirty list.")
}
atomic.AddUint64(&c.merged, 1)
}
// For on-demand merging of all lists.
func process(ch chan gotomic.Hashable, c *counters, wg *sync.WaitGroup) {
for l := range ch {
processOne(l, c)
}
if wg != nil {
wg.Done()
}
}
func queueAll(ch chan gotomic.Hashable, c *counters) {
lhmap.Each(func(k gotomic.Hashable, v gotomic.Thing) bool {
ch <- k
atomic.AddUint64(&c.added, 1)
return false // If this returns true, Each would break.
})
close(ch)
}
func MergeLists(numRoutines int) {
ch := make(chan gotomic.Hashable, 10000)
c := new(counters)
c.ticker = time.NewTicker(time.Second)
go c.periodicLog()
go queueAll(ch, c)
wg := new(sync.WaitGroup)
for i := 0; i < numRoutines; i++ {
wg.Add(1)
go process(ch, c, wg)
}
wg.Wait()
c.ticker.Stop()
}
// For periodic merging of lists.
func queueRandomLists(ch chan gotomic.Hashable, c *counters) {
var buf []gotomic.Hashable
var count int
needed := cap(ch) - len(ch)
if needed < 100 {
return
}
// Generate a random list of
lhmap.Each(func(k gotomic.Hashable, v gotomic.Thing) bool {
if count < needed {
buf = append(buf, k)
} else {
j := rand.Intn(count)
if j < len(buf) {
buf[j] = k
}
}
count += 1
return false
})
for _, k := range buf {
ch <- k
atomic.AddUint64(&c.added, 1)
}
}
func periodicQueueForProcessing(ch chan gotomic.Hashable, c *counters) {
ticker := time.NewTicker(time.Minute)
for _ = range ticker.C {
queueRandomLists(ch, c)
}
}
func periodicProcess(ch chan gotomic.Hashable, c *counters) {
ticker := time.NewTicker(100 * time.Millisecond)
for _ = range ticker.C {
hid := <-ch
processOne(hid, c)
}
}
func StartPeriodicMerging() {
ctr := new(counters)
ch := make(chan gotomic.Hashable, 10000)
go periodicQueueForProcessing(ch, ctr)
go periodicProcess(ch, ctr)
}
|
package model
import (
"strings"
"testing"
"time"
"github.com/oinume/lekcije/server/config"
"github.com/stretchr/testify/assert"
)
func TestUpdateLessons(t *testing.T) {
a := assert.New(t)
lessons := make([]*Lesson, 5)
datetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())
now := time.Now()
for i := range lessons {
lessons[i] = &Lesson{
TeacherId: 1,
Datetime: datetime.Add(time.Duration(i) * time.Hour),
Status: "Reserved",
CreatedAt: now,
UpdatedAt: now,
}
}
affected, err := LessonService.UpdateLessons(lessons)
a.NoError(err)
a.Equal(int64(5), affected)
foundLessons, err := LessonService.FindLessons(1, datetime, datetime)
a.NoError(err)
a.Equal(len(lessons), len(foundLessons))
for i := range lessons {
// TODO: custom enum type
a.Equal(strings.ToLower(lessons[i].Status), strings.ToLower(foundLessons[i].Status))
}
// overwrite (TODO: split test)
lessons[0].Status = "Available"
affected, err = LessonService.UpdateLessons(lessons)
a.NoError(err)
//a.Equal(int64(1), affected)
foundLessons, err = LessonService.FindLessons(1, datetime, datetime)
a.Equal(strings.ToLower(foundLessons[0].Status), "available")
}
Split test
package model
import (
"strings"
"testing"
"time"
"github.com/oinume/lekcije/server/config"
"github.com/stretchr/testify/assert"
)
func TestUpdateLessons(t *testing.T) {
a := assert.New(t)
teacherId := uint32(1)
datetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())
lessons := createLessons(teacherId, datetime, "Reserved", 5)
affected, err := LessonService.UpdateLessons(lessons)
a.NoError(err)
a.Equal(int64(5), affected)
foundLessons, err := LessonService.FindLessons(teacherId, datetime, datetime)
a.NoError(err)
a.Equal(len(lessons), len(foundLessons))
for i := range lessons {
// TODO: custom enum type
a.Equal(strings.ToLower(lessons[i].Status), strings.ToLower(foundLessons[i].Status))
}
}
func TestUpdateLessonsOverwrite(t *testing.T) {
a := assert.New(t)
datetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())
lessons := createLessons(1, datetime, "Reserved", 5)
lessons[0].Status = "Available"
affected, err := LessonService.UpdateLessons(lessons)
a.NoError(err)
a.Equal(int64(2), affected) // Why 2????
foundLessons, err := LessonService.FindLessons(1, datetime, datetime)
a.Equal(strings.ToLower(foundLessons[0].Status), "available")
}
func TestGetNewAvailableLessons(t *testing.T) {
//a := assert.New(t)
datetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())
lessons1 := createLessons(1, datetime, "Reserved", 3)
lessons2 := createLessons(1, datetime, "Reserved", 3)
lessons2[0].Status = "Available"
LessonService.GetNewAvailableLessons(lessons1, lessons2)
}
func createLessons(teacherId uint32, baseDatetime time.Time, status string, length int) []*Lesson {
lessons := make([]*Lesson, length)
now := time.Now()
for i := range lessons {
lessons[i] = &Lesson{
TeacherId: teacherId,
Datetime: baseDatetime.Add(time.Duration(i) * time.Hour),
Status: status,
CreatedAt: now,
UpdatedAt: now,
}
}
return lessons
}
|
package main
import (
"fmt"
"github.com/luisvinicius167/godux"
)
func main() {
// Creating new Store
store := godux.NewStore()
store.Setstate("count", 1)
// Creating new Action
increment := func(number int) godux.Action {
return godux.Action{
Type: "INCREMENT",
Value: number,
}
}
decrement := func(number int) godux.Action {
return godux.Action{
Type: "DECREMENT",
Value: number,
}
}
// reducer function
reducer := func(action godux.Action) interface{} {
switch action.Type {
case "INCREMENT":
return store.GetState("count").(int) + action.Value.(int)
case "DECREMENT":
return action.Value.(int) - store.GetState("count").(int)
default:
return store.GetAllState()
}
}
// Add your reducer function to return new values basend on your state
store.Reducer(reducer)
// Receive new value
newCount := store.Dispatch(increment(10))
subCount := store.Dispatch(decrement(10))
fmt.Printf("Your Store state is: %s. Your newCount is: %s. Your subCount is: %s", store.GetState("count"), newCount, subCount)
}
Update example.go
package main
import (
"fmt"
"github.com/luisvinicius167/godux"
)
func main() {
// Creating new Store
store := godux.NewStore()
store.SetState("count", 1)
// Creating new Action
increment := func(number int) godux.Action {
return godux.Action{
Type: "INCREMENT",
Value: number,
}
}
decrement := func(number int) godux.Action {
return godux.Action{
Type: "DECREMENT",
Value: number,
}
}
// reducer function
reducer := func(action godux.Action) interface{} {
switch action.Type {
case "INCREMENT":
return store.GetState("count").(int) + action.Value.(int)
case "DECREMENT":
return action.Value.(int) - store.GetState("count").(int)
default:
return store.GetAllState()
}
}
// Add your reducer function to return new values basend on your state
store.Reducer(reducer)
// Receive new value
newCount := store.Dispatch(increment(10))
subCount := store.Dispatch(decrement(10))
fmt.Printf("Your Store state is: %s. Your newCount is: %s. Your subCount is: %s", store.GetState("count"), newCount, subCount)
}
|
// Package premailer is for inline styling.
//
// import (
// "fmt"
// "github.com/vanng822/go-premailer/premailer"
// "log"
// )
//
// func main() {
// prem := premailer.NewPremailerFromFile(inputFile, premailer.NewOptions())
// html, err := prem.Transform()
// if err != nil {
// log.Fatal(err)
// }
//
// fmt.Println(html)
// }
package premailer
Reformatting
// Package premailer is for inline styling.
//
// import (
// "fmt"
// "github.com/vanng822/go-premailer/premailer"
// "log"
// )
//
// func main() {
// prem := premailer.NewPremailerFromFile(inputFile, premailer.NewOptions())
// html, err := prem.Transform()
// if err != nil {
// log.Fatal(err)
// }
//
// fmt.Println(html)
// }
package premailer
|
package drivers
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/flosch/pongo2"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
liblxc "gopkg.in/lxc/go-lxc.v2"
yaml "gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cgroup"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/device"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/device/nictype"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/instance/operationlock"
"github.com/lxc/lxd/lxd/maas"
"github.com/lxc/lxd/lxd/network"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/seccomp"
"github.com/lxc/lxd/lxd/state"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/template"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/instancewriter"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
"github.com/lxc/lxd/shared/netutils"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/units"
)
// Helper functions
func lxcSetConfigItem(c *liblxc.Container, key string, value string) error {
if c == nil {
return fmt.Errorf("Uninitialized go-lxc struct")
}
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
switch key {
case "lxc.uts.name":
key = "lxc.utsname"
case "lxc.pty.max":
key = "lxc.pts"
case "lxc.tty.dir":
key = "lxc.devttydir"
case "lxc.tty.max":
key = "lxc.tty"
case "lxc.apparmor.profile":
key = "lxc.aa_profile"
case "lxc.apparmor.allow_incomplete":
key = "lxc.aa_allow_incomplete"
case "lxc.selinux.context":
key = "lxc.se_context"
case "lxc.mount.fstab":
key = "lxc.mount"
case "lxc.console.path":
key = "lxc.console"
case "lxc.seccomp.profile":
key = "lxc.seccomp"
case "lxc.signal.halt":
key = "lxc.haltsignal"
case "lxc.signal.reboot":
key = "lxc.rebootsignal"
case "lxc.signal.stop":
key = "lxc.stopsignal"
case "lxc.log.syslog":
key = "lxc.syslog"
case "lxc.log.level":
key = "lxc.loglevel"
case "lxc.log.file":
key = "lxc.logfile"
case "lxc.init.cmd":
key = "lxc.init_cmd"
case "lxc.init.uid":
key = "lxc.init_uid"
case "lxc.init.gid":
key = "lxc.init_gid"
case "lxc.idmap":
key = "lxc.id_map"
}
}
if strings.HasPrefix(key, "lxc.prlimit.") {
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
return fmt.Errorf(`Process limits require liblxc >= 2.1`)
}
}
err := c.SetConfigItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
}
return nil
}
func lxcStatusCode(state liblxc.State) api.StatusCode {
return map[int]api.StatusCode{
1: api.Stopped,
2: api.Starting,
3: api.Running,
4: api.Stopping,
5: api.Aborting,
6: api.Freezing,
7: api.Frozen,
8: api.Thawed,
9: api.Error,
}[int(state)]
}
// Loader functions
func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
// Create the container struct
d := &lxc{
common: common{
state: s,
architecture: args.Architecture,
creationDate: args.CreationDate,
dbType: args.Type,
description: args.Description,
ephemeral: args.Ephemeral,
expiryDate: args.ExpiryDate,
id: args.ID,
lastUsedDate: args.LastUsedDate,
localConfig: args.Config,
localDevices: args.Devices,
logger: logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type, "instance": args.Name, "project": args.Project}),
name: args.Name,
node: args.Node,
profiles: args.Profiles,
project: args.Project,
snapshot: args.Snapshot,
stateful: args.Stateful,
},
}
revert := revert.New()
defer revert.Fail()
// Use d.Delete() in revert on error as this function doesn't just create DB records, it can also cause
// other modifications to the host when devices are added.
revert.Add(func() { d.Delete(true) })
// Cleanup the zero values
if d.expiryDate.IsZero() {
d.expiryDate = time.Time{}
}
if d.creationDate.IsZero() {
d.creationDate = time.Time{}
}
if d.lastUsedDate.IsZero() {
d.lastUsedDate = time.Time{}
}
d.logger.Info("Creating container", log.Ctx{"ephemeral": d.ephemeral})
// Load the config.
err := d.init()
if err != nil {
return nil, errors.Wrap(err, "Failed to expand config")
}
// Validate expanded config.
err = instance.ValidConfig(s.OS, d.expandedConfig, false, true)
if err != nil {
return nil, errors.Wrap(err, "Invalid config")
}
err = instance.ValidDevices(s, s.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
if err != nil {
return nil, errors.Wrap(err, "Invalid devices")
}
// Retrieve the container's storage pool.
var storageInstance instance.Instance
if d.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
// Load the parent.
storageInstance, err = instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
return nil, errors.Wrap(err, "Invalid parent")
}
} else {
storageInstance = d
}
_, rootDiskDevice, err := shared.GetRootDiskDevice(storageInstance.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
if rootDiskDevice["pool"] == "" {
return nil, fmt.Errorf("The container's root device is missing the pool property")
}
// Initialize the storage pool.
d.storagePool, err = storagePools.GetPoolByName(d.state, rootDiskDevice["pool"])
if err != nil {
return nil, errors.Wrapf(err, "Failed loading storage pool")
}
// Create a new storage volume database entry for the container's storage volume.
if d.IsSnapshot() {
// Copy volume config from parent.
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(args.Name)
_, parentVol, err := s.Cluster.GetLocalStoragePoolVolume(args.Project, parentName, db.StoragePoolVolumeTypeContainer, d.storagePool.ID())
if err != nil {
return nil, errors.Wrapf(err, "Failed loading source volume for snapshot")
}
_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), parentVol.Config, time.Time{})
if err != nil {
return nil, errors.Wrapf(err, "Failed creating storage record for snapshot")
}
} else {
// Fill default config for new instances.
volumeConfig := map[string]string{}
err = d.storagePool.FillInstanceConfig(d, volumeConfig)
if err != nil {
return nil, errors.Wrapf(err, "Failed filling default config")
}
_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeFS)
if err != nil {
return nil, errors.Wrapf(err, "Failed creating storage record")
}
}
// Setup initial idmap config
var idmap *idmap.IdmapSet
base := int64(0)
if !d.IsPrivileged() {
idmap, base, err = findIdmap(
s,
args.Name,
d.expandedConfig["security.idmap.isolated"],
d.expandedConfig["security.idmap.base"],
d.expandedConfig["security.idmap.size"],
d.expandedConfig["raw.idmap"],
)
if err != nil {
return nil, err
}
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
return nil, err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
err = d.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
if err != nil {
return nil, err
}
err = d.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
if err != nil {
return nil, err
}
// Invalid idmap cache.
d.idmapset = nil
// Set last_state if not currently set.
if d.localConfig["volatile.last_state.idmap"] == "" {
err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
if err != nil {
return nil, err
}
}
// Re-run init to update the idmap.
err = d.init()
if err != nil {
return nil, err
}
if !d.IsSnapshot() {
// Add devices to container.
for k, m := range d.expandedDevices {
err = d.deviceAdd(k, m, false)
if err != nil && err != device.ErrUnsupportedDevType {
return nil, errors.Wrapf(err, "Failed to add device %q", k)
}
}
// Update MAAS (must run after the MAC addresses have been generated).
err = d.maasUpdate(nil)
if err != nil {
return nil, err
}
}
d.logger.Info("Created container", log.Ctx{"ephemeral": d.ephemeral})
d.lifecycle("created", nil)
revert.Success()
return d, nil
}
func lxcLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
// Create the container struct
d := lxcInstantiate(s, args, nil)
// Setup finalizer
runtime.SetFinalizer(d, lxcUnload)
// Expand config and devices
err := d.(*lxc).expandConfig(profiles)
if err != nil {
return nil, err
}
err = d.(*lxc).expandDevices(profiles)
if err != nil {
return nil, err
}
return d, nil
}
// Unload is called by the garbage collector
func lxcUnload(d *lxc) {
runtime.SetFinalizer(d, nil)
if d.c != nil {
d.c.Release()
d.c = nil
}
}
// Create a container struct without initializing it.
func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) instance.Instance {
d := &lxc{
common: common{
state: s,
architecture: args.Architecture,
creationDate: args.CreationDate,
dbType: args.Type,
description: args.Description,
ephemeral: args.Ephemeral,
expiryDate: args.ExpiryDate,
id: args.ID,
lastUsedDate: args.LastUsedDate,
localConfig: args.Config,
localDevices: args.Devices,
logger: logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type, "instance": args.Name, "project": args.Project}),
name: args.Name,
node: args.Node,
profiles: args.Profiles,
project: args.Project,
snapshot: args.Snapshot,
stateful: args.Stateful,
},
}
// Cleanup the zero values
if d.expiryDate.IsZero() {
d.expiryDate = time.Time{}
}
if d.creationDate.IsZero() {
d.creationDate = time.Time{}
}
if d.lastUsedDate.IsZero() {
d.lastUsedDate = time.Time{}
}
// This is passed during expanded config validation.
if expandedDevices != nil {
d.expandedDevices = expandedDevices
}
return d
}
// The LXC container driver.
type lxc struct {
common
// Config handling.
fromHook bool
// Cached handles.
// Do not use these variables directly, instead use their associated get functions so they
// will be initialised on demand.
c *liblxc.Container
cConfig bool
idmapset *idmap.IdmapSet
storagePool storagePools.Pool
}
func idmapSize(state *state.State, isolatedStr string, size string) (int64, error) {
isolated := false
if shared.IsTrue(isolatedStr) {
isolated = true
}
var idMapSize int64
if size == "" || size == "auto" {
if isolated {
idMapSize = 65536
} else {
if len(state.OS.IdmapSet.Idmap) != 2 {
return 0, fmt.Errorf("bad initial idmap: %v", state.OS.IdmapSet)
}
idMapSize = state.OS.IdmapSet.Idmap[0].Maprange
}
} else {
size, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return 0, err
}
idMapSize = size
}
return idMapSize, nil
}
var idmapLock sync.Mutex
func findIdmap(state *state.State, cName string, isolatedStr string, configBase string, configSize string, rawIdmap string) (*idmap.IdmapSet, int64, error) {
isolated := false
if shared.IsTrue(isolatedStr) {
isolated = true
}
rawMaps, err := instance.ParseRawIdmap(rawIdmap)
if err != nil {
return nil, 0, err
}
if !isolated {
newIdmapset := idmap.IdmapSet{Idmap: make([]idmap.IdmapEntry, len(state.OS.IdmapSet.Idmap))}
copy(newIdmapset.Idmap, state.OS.IdmapSet.Idmap)
for _, ent := range rawMaps {
err := newIdmapset.AddSafe(ent)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
}
return &newIdmapset, 0, nil
}
size, err := idmapSize(state, isolatedStr, configSize)
if err != nil {
return nil, 0, err
}
mkIdmap := func(offset int64, size int64) (*idmap.IdmapSet, error) {
set := &idmap.IdmapSet{Idmap: []idmap.IdmapEntry{
{Isuid: true, Nsid: 0, Hostid: offset, Maprange: size},
{Isgid: true, Nsid: 0, Hostid: offset, Maprange: size},
}}
for _, ent := range rawMaps {
err := set.AddSafe(ent)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, err
}
}
return set, nil
}
if configBase != "" {
offset, err := strconv.ParseInt(configBase, 10, 64)
if err != nil {
return nil, 0, err
}
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
idmapLock.Lock()
defer idmapLock.Unlock()
cts, err := instance.LoadNodeAll(state, instancetype.Container)
if err != nil {
return nil, 0, err
}
offset := state.OS.IdmapSet.Idmap[0].Hostid + 65536
mapentries := idmap.ByHostid{}
for _, container := range cts {
if container.Type() != instancetype.Container {
continue
}
name := container.Name()
/* Don't change our map Just Because. */
if name == cName {
continue
}
if container.IsPrivileged() {
continue
}
if !shared.IsTrue(container.ExpandedConfig()["security.idmap.isolated"]) {
continue
}
cBase := int64(0)
if container.ExpandedConfig()["volatile.idmap.base"] != "" {
cBase, err = strconv.ParseInt(container.ExpandedConfig()["volatile.idmap.base"], 10, 64)
if err != nil {
return nil, 0, err
}
}
cSize, err := idmapSize(state, container.ExpandedConfig()["security.idmap.isolated"], container.ExpandedConfig()["security.idmap.size"])
if err != nil {
return nil, 0, err
}
mapentries = append(mapentries, &idmap.IdmapEntry{Hostid: int64(cBase), Maprange: cSize})
}
sort.Sort(mapentries)
for i := range mapentries {
if i == 0 {
if mapentries[0].Hostid < offset+size {
offset = mapentries[0].Hostid + mapentries[0].Maprange
continue
}
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
if mapentries[i-1].Hostid+mapentries[i-1].Maprange > offset {
offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
continue
}
offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
if offset+size < mapentries[i].Hostid {
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
offset = mapentries[i].Hostid + mapentries[i].Maprange
}
if offset+size < state.OS.IdmapSet.Idmap[0].Hostid+state.OS.IdmapSet.Idmap[0].Maprange {
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
return nil, 0, fmt.Errorf("Not enough uid/gid available for the container")
}
func (d *lxc) init() error {
// Compute the expanded config and device list
err := d.expandConfig(nil)
if err != nil {
return err
}
err = d.expandDevices(nil)
if err != nil {
return err
}
return nil
}
func (d *lxc) initLXC(config bool) error {
// No need to go through all that for snapshots
if d.IsSnapshot() {
return nil
}
// Check if being called from a hook
if d.fromHook {
return fmt.Errorf("You can't use go-lxc from inside a LXC hook")
}
// Check if already initialized
if d.c != nil {
if !config || d.cConfig {
return nil
}
}
// Load the go-lxc struct
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err != nil {
return err
}
// Load cgroup abstraction
cg, err := d.cgroup(cc)
if err != nil {
return err
}
freeContainer := true
defer func() {
if freeContainer {
cc.Release()
}
}()
// Setup logging
logfile := d.LogFilePath()
err = lxcSetConfigItem(cc, "lxc.log.file", logfile)
if err != nil {
return err
}
logLevel := "warn"
if daemon.Debug {
logLevel = "trace"
} else if daemon.Verbose {
logLevel = "info"
}
err = lxcSetConfigItem(cc, "lxc.log.level", logLevel)
if err != nil {
return err
}
if util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
// Default size log buffer
err = lxcSetConfigItem(cc, "lxc.console.buffer.size", "auto")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.console.size", "auto")
if err != nil {
return err
}
// File to dump ringbuffer contents to when requested or
// container shutdown.
consoleBufferLogFile := d.ConsoleBufferLogPath()
err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile)
if err != nil {
return err
}
}
// Allow for lightweight init
d.cConfig = config
if !config {
if d.c != nil {
d.c.Release()
}
d.c = cc
freeContainer = false
return nil
}
if d.IsPrivileged() {
// Base config
toDrop := "sys_time sys_module sys_rawio"
if !d.state.OS.AppArmorStacking || d.state.OS.AppArmorStacked {
toDrop = toDrop + " mac_admin mac_override"
}
err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop)
if err != nil {
return err
}
}
// Set an appropriate /proc, /sys/ and /sys/fs/cgroup
mounts := []string{}
if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
mounts = append(mounts, "proc:mixed")
mounts = append(mounts, "sys:mixed")
} else {
mounts = append(mounts, "proc:rw")
mounts = append(mounts, "sys:rw")
}
cgInfo := cgroup.GetInfo()
if cgInfo.Namespacing {
if cgInfo.Layout == cgroup.CgroupsUnified {
mounts = append(mounts, "cgroup:rw:force")
} else {
mounts = append(mounts, "cgroup:mixed")
}
} else {
mounts = append(mounts, "cgroup:mixed")
}
err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " "))
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.autodev", "1")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.pty.max", "1024")
if err != nil {
return err
}
bindMounts := []string{
"/dev/fuse",
"/dev/net/tun",
"/proc/sys/fs/binfmt_misc",
"/sys/firmware/efi/efivars",
"/sys/fs/fuse/connections",
"/sys/fs/pstore",
"/sys/kernel/config",
"/sys/kernel/debug",
"/sys/kernel/security",
"/sys/kernel/tracing",
}
if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0")
if err != nil {
return err
}
} else {
bindMounts = append(bindMounts, "/dev/mqueue")
}
for _, mnt := range bindMounts {
if !shared.PathExists(mnt) {
continue
}
if shared.IsDir(mnt) {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
if err != nil {
return err
}
} else {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
if err != nil {
return err
}
}
}
// For lxcfs
templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
if templateConfDir == "" {
templateConfDir = "/usr/share/lxc/config"
}
if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) {
err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir))
if err != nil {
return err
}
}
// Configure devices cgroup
if d.IsPrivileged() && !d.state.OS.RunningInUserNS && d.state.OS.CGInfo.Supports(cgroup.Devices, cg) {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(cc, "lxc.cgroup2.devices.deny", "a")
} else {
err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a")
}
if err != nil {
return err
}
devices := []string{
"b *:* m", // Allow mknod of block devices
"c *:* m", // Allow mknod of char devices
"c 136:* rwm", // /dev/pts devices
"c 1:3 rwm", // /dev/null
"c 1:5 rwm", // /dev/zero
"c 1:7 rwm", // /dev/full
"c 1:8 rwm", // /dev/random
"c 1:9 rwm", // /dev/urandom
"c 5:0 rwm", // /dev/tty
"c 5:1 rwm", // /dev/console
"c 5:2 rwm", // /dev/ptmx
"c 10:229 rwm", // /dev/fuse
"c 10:200 rwm", // /dev/net/tun
}
for _, dev := range devices {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(cc, "lxc.cgroup2.devices.allow", dev)
} else {
err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev)
}
if err != nil {
return err
}
}
}
if d.IsNesting() {
/*
* mount extra /proc and /sys to work around kernel
* restrictions on remounting them when covered
*/
err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional 0 0")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional 0 0")
if err != nil {
return err
}
}
// Setup architecture
personality, err := osarch.ArchitecturePersonality(d.architecture)
if err != nil {
personality, err = osarch.ArchitecturePersonality(d.state.OS.Architectures[0])
if err != nil {
return err
}
}
err = lxcSetConfigItem(cc, "lxc.arch", personality)
if err != nil {
return err
}
// Setup the hooks
err = lxcSetConfigItem(cc, "lxc.hook.version", "1")
if err != nil {
return err
}
// Call the onstart hook on start.
err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %s %s start", os.Getpid(), shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Call the onstopns hook on stop but before namespaces are unmounted.
err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %s %s stopns", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Call the onstop hook on stop.
err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %s %s stop", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Setup the console
err = lxcSetConfigItem(cc, "lxc.tty.max", "0")
if err != nil {
return err
}
// Setup the hostname
err = lxcSetConfigItem(cc, "lxc.uts.name", d.Name())
if err != nil {
return err
}
// Setup devlxd
if d.expandedConfig["security.devlxd"] == "" || shared.IsTrue(d.expandedConfig["security.devlxd"]) {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd")))
if err != nil {
return err
}
}
// Setup AppArmor
if d.state.OS.AppArmorAvailable {
if d.state.OS.AppArmorConfined || !d.state.OS.AppArmorAdmin {
// If confined but otherwise able to use AppArmor, use our own profile
curProfile := util.AppArmorProfile()
curProfile = strings.TrimSuffix(curProfile, " (enforce)")
err := lxcSetConfigItem(cc, "lxc.apparmor.profile", curProfile)
if err != nil {
return err
}
} else {
// If not currently confined, use the container's profile
profile := apparmor.InstanceProfileName(d)
/* In the nesting case, we want to enable the inside
* LXD to load its profile. Unprivileged containers can
* load profiles, but privileged containers cannot, so
* let's not use a namespace so they can fall back to
* the old way of nesting, i.e. using the parent's
* profile.
*/
if d.state.OS.AppArmorStacking && !d.state.OS.AppArmorStacked {
profile = fmt.Sprintf("%s//&:%s:", profile, apparmor.InstanceNamespaceName(d))
}
err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile)
if err != nil {
return err
}
}
}
// Setup Seccomp if necessary
if seccomp.InstanceNeedsPolicy(d) {
err = lxcSetConfigItem(cc, "lxc.seccomp.profile", seccomp.ProfilePath(d))
if err != nil {
return err
}
// Setup notification socket
// System requirement errors are handled during policy generation instead of here
ok, err := seccomp.InstanceNeedsIntercept(d.state, d)
if err == nil && ok {
err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
if err != nil {
return err
}
}
}
// Setup idmap
idmapset, err := d.NextIdmap()
if err != nil {
return err
}
if idmapset != nil {
lines := idmapset.ToLxcString()
for _, line := range lines {
err := lxcSetConfigItem(cc, "lxc.idmap", line)
if err != nil {
return err
}
}
}
// Setup environment
for k, v := range d.expandedConfig {
if strings.HasPrefix(k, "environment.") {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
if err != nil {
return err
}
}
}
// Setup NVIDIA runtime
if shared.IsTrue(d.expandedConfig["nvidia.runtime"]) {
hookDir := os.Getenv("LXD_LXC_HOOK")
if hookDir == "" {
hookDir = "/usr/share/lxc/hooks"
}
hookPath := filepath.Join(hookDir, "nvidia")
if !shared.PathExists(hookPath) {
return fmt.Errorf("The NVIDIA LXC hook couldn't be found")
}
_, err := exec.LookPath("nvidia-container-cli")
if err != nil {
return fmt.Errorf("The NVIDIA container tools couldn't be found")
}
err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_VISIBLE_DEVICES=none")
if err != nil {
return err
}
nvidiaDriver := d.expandedConfig["nvidia.driver.capabilities"]
if nvidiaDriver == "" {
err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility")
if err != nil {
return err
}
} else {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_DRIVER_CAPABILITIES=%s", nvidiaDriver))
if err != nil {
return err
}
}
nvidiaRequireCuda := d.expandedConfig["nvidia.require.cuda"]
if nvidiaRequireCuda == "" {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda))
if err != nil {
return err
}
}
nvidiaRequireDriver := d.expandedConfig["nvidia.require.driver"]
if nvidiaRequireDriver == "" {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver))
if err != nil {
return err
}
}
err = lxcSetConfigItem(cc, "lxc.hook.mount", hookPath)
if err != nil {
return err
}
}
// Memory limits
if d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
memory := d.expandedConfig["limits.memory"]
memoryEnforce := d.expandedConfig["limits.memory.enforce"]
memorySwap := d.expandedConfig["limits.memory.swap"]
memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
// Configure the memory limits
if memory != "" {
var valueInt int64
if strings.HasSuffix(memory, "%") {
percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
if err != nil {
return err
}
memoryTotal, err := shared.DeviceTotalMemory()
if err != nil {
return err
}
valueInt = int64((memoryTotal / 100) * percent)
} else {
valueInt, err = units.ParseByteSizeString(memory)
if err != nil {
return err
}
}
if memoryEnforce == "soft" {
err = cg.SetMemorySoftLimit(valueInt)
if err != nil {
return err
}
} else {
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
err = cg.SetMemoryLimit(valueInt)
if err != nil {
return err
}
err = cg.SetMemorySwapLimit(0)
if err != nil {
return err
}
} else {
err = cg.SetMemoryLimit(valueInt)
if err != nil {
return err
}
}
// Set soft limit to value 10% less than hard limit
err = cg.SetMemorySoftLimit(int64(float64(valueInt) * 0.9))
if err != nil {
return err
}
}
}
if d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
// Configure the swappiness
if memorySwap != "" && !shared.IsTrue(memorySwap) {
err = cg.SetMemorySwappiness(0)
if err != nil {
return err
}
} else if memorySwapPriority != "" {
priority, err := strconv.Atoi(memorySwapPriority)
if err != nil {
return err
}
err = cg.SetMemorySwappiness(int64(60 - 10 + priority))
if err != nil {
return err
}
}
}
}
// CPU limits
cpuPriority := d.expandedConfig["limits.cpu.priority"]
cpuAllowance := d.expandedConfig["limits.cpu.allowance"]
if (cpuPriority != "" || cpuAllowance != "") && d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(cpuAllowance, cpuPriority)
if err != nil {
return err
}
if cpuShares != 1024 {
err = cg.SetCPUShare(cpuShares)
if err != nil {
return err
}
}
if cpuCfsPeriod != -1 && cpuCfsQuota != -1 {
err = cg.SetCPUCfsLimit(cpuCfsPeriod, cpuCfsQuota)
if err != nil {
return err
}
}
}
// Disk priority limits.
diskPriority := d.ExpandedConfig()["limits.disk.priority"]
if diskPriority != "" {
if d.state.OS.CGInfo.Supports(cgroup.BlkioWeight, nil) {
priorityInt, err := strconv.Atoi(diskPriority)
if err != nil {
return err
}
priority := priorityInt * 100
// Minimum valid value is 10
if priority == 0 {
priority = 10
}
err = cg.SetBlkioWeight(int64(priority))
if err != nil {
return err
}
} else {
return fmt.Errorf("Cannot apply limits.disk.priority as blkio.weight cgroup controller is missing")
}
}
// Processes
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
processes := d.expandedConfig["limits.processes"]
if processes != "" {
valueInt, err := strconv.ParseInt(processes, 10, 64)
if err != nil {
return err
}
err = cg.SetMaxProcesses(valueInt)
if err != nil {
return err
}
}
}
// Hugepages
if d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
for i, key := range shared.HugePageSizeKeys {
value := d.expandedConfig[key]
if value != "" {
value, err := units.ParseByteSizeString(value)
if err != nil {
return err
}
err = cg.SetHugepagesLimit(shared.HugePageSizeSuffix[i], value)
if err != nil {
return err
}
}
}
}
// Setup process limits
for k, v := range d.expandedConfig {
if strings.HasPrefix(k, "limits.kernel.") {
prlimitSuffix := strings.TrimPrefix(k, "limits.kernel.")
prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix)
err = lxcSetConfigItem(cc, prlimitKey, v)
if err != nil {
return err
}
}
}
// Setup shmounts
if d.state.OS.LXCFeatures["mount_injection_file"] {
err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", d.ShmountsPath()))
} else {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", d.ShmountsPath()))
}
if err != nil {
return err
}
// Apply raw.lxc
if lxcConfig, ok := d.expandedConfig["raw.lxc"]; ok {
f, err := ioutil.TempFile("", "lxd_config_")
if err != nil {
return err
}
err = shared.WriteAll(f, []byte(lxcConfig))
f.Close()
defer os.Remove(f.Name())
if err != nil {
return err
}
if err := cc.LoadConfigFile(f.Name()); err != nil {
return fmt.Errorf("Failed to load raw.lxc")
}
}
if d.c != nil {
d.c.Release()
}
d.c = cc
freeContainer = false
return nil
}
func (d *lxc) devlxdEventSend(eventType string, eventMessage interface{}) error {
event := shared.Jmap{}
event["type"] = eventType
event["timestamp"] = time.Now()
event["metadata"] = eventMessage
return d.state.DevlxdEvents.Send(strconv.Itoa(d.ID()), eventType, eventMessage)
}
// RegisterDevices calls the Register() function on all of the instance's devices.
func (d *lxc) RegisterDevices() {
devices := d.ExpandedDevices()
for _, entry := range devices.Sorted() {
dev, _, err := d.deviceLoad(entry.Name, entry.Config)
if err == device.ErrUnsupportedDevType {
continue
}
if err != nil {
d.logger.Error("Failed to load device to register", log.Ctx{"err": err, "device": entry.Name})
continue
}
// Check whether device wants to register for any events.
err = dev.Register()
if err != nil {
d.logger.Error("Failed to register device", log.Ctx{"err": err, "device": entry.Name})
continue
}
}
}
// deviceLoad instantiates and validates a new device and returns it along with enriched config.
func (d *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
var configCopy deviceConfig.Device
var err error
// Create copy of config and load some fields from volatile if device is nic or infiniband.
if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
configCopy, err = d.FillNetworkDevice(deviceName, rawConfig)
if err != nil {
return nil, nil, err
}
} else {
// Othewise copy the config so it cannot be modified by device.
configCopy = rawConfig.Clone()
}
dev, err := device.New(d, d.state, deviceName, configCopy, d.deviceVolatileGetFunc(deviceName), d.deviceVolatileSetFunc(deviceName))
// Return device and config copy even if error occurs as caller may still use device.
return dev, configCopy, err
}
// deviceAdd loads a new device and calls its Add() function.
func (d *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
dev, _, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be added when instance is running")
}
return dev.Add()
}
// deviceStart loads a new device and calls its Start() function.
func (d *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
logger.Debug("Starting device")
dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return nil, err
}
if instanceRunning && !dev.CanHotPlug() {
return nil, fmt.Errorf("Device cannot be started when instance is running")
}
runConf, err := dev.Start()
if err != nil {
return nil, err
}
// If runConf supplied, perform any container specific setup of device.
if runConf != nil {
// Shift device file ownership if needed before mounting into container.
// This needs to be done whether or not container is running.
if len(runConf.Mounts) > 0 {
err := d.deviceStaticShiftMounts(runConf.Mounts)
if err != nil {
return nil, err
}
}
// If container is running and then live attach device.
if instanceRunning {
// Attach mounts if requested.
if len(runConf.Mounts) > 0 {
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return nil, err
}
}
// Add cgroup rules if requested.
if len(runConf.CGroups) > 0 {
err = d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return nil, err
}
}
// Attach network interface if requested.
if len(runConf.NetworkInterface) > 0 {
err = d.deviceAttachNIC(configCopy, runConf.NetworkInterface)
if err != nil {
return nil, err
}
}
// If running, run post start hooks now (if not running LXD will run them
// once the instance is started).
err = d.runHooks(runConf.PostHooks)
if err != nil {
return nil, err
}
}
}
return runConf, nil
}
// deviceStaticShiftMounts statically shift device mount files ownership to active idmap if needed.
func (d *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) error {
idmapSet, err := d.CurrentIdmap()
if err != nil {
return fmt.Errorf("Failed to get idmap for device: %s", err)
}
// If there is an idmap being applied and LXD not running in a user namespace then shift the
// device files before they are mounted.
if idmapSet != nil && !d.state.OS.RunningInUserNS {
for _, mount := range mounts {
// Skip UID/GID shifting if OwnerShift mode is not static, or the host-side
// DevPath is empty (meaning an unmount request that doesn't need shifting).
if mount.OwnerShift != deviceConfig.MountOwnerShiftStatic || mount.DevPath == "" {
continue
}
err := idmapSet.ShiftFile(mount.DevPath)
if err != nil {
// uidshift failing is weird, but not a big problem. Log and proceed.
d.logger.Debug("Failed to uidshift device", log.Ctx{"mountDevPath": mount.DevPath, "err": err})
}
}
}
return nil
}
// deviceAddCgroupRules live adds cgroup rules to a container.
func (d *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error {
cg, err := d.cgroup(nil)
if err != nil {
return err
}
for _, rule := range cgroups {
// Only apply devices cgroup rules if container is running privileged and host has devices cgroup controller.
if strings.HasPrefix(rule.Key, "devices.") && (!d.isCurrentlyPrivileged() || d.state.OS.RunningInUserNS || !d.state.OS.CGInfo.Supports(cgroup.Devices, cg)) {
continue
}
// Add the new device cgroup rule.
err := d.CGroupSet(rule.Key, rule.Value)
if err != nil {
return fmt.Errorf("Failed to add cgroup rule for device")
}
}
return nil
}
// deviceAttachNIC live attaches a NIC device to a container.
func (d *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem) error {
devName := ""
for _, dev := range netIF {
if dev.Key == "link" {
devName = dev.Value
break
}
}
if devName == "" {
return fmt.Errorf("Device didn't provide a link property to use")
}
// Load the go-lxc struct.
err := d.initLXC(false)
if err != nil {
return err
}
// Add the interface to the container.
err = d.c.AttachInterface(devName, configCopy["name"])
if err != nil {
return fmt.Errorf("Failed to attach interface: %s to %s: %s", devName, configCopy["name"], err)
}
return nil
}
// deviceUpdate loads a new device and calls its Update() function.
func (d *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
dev, _, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
err = dev.Update(oldDevices, instanceRunning)
if err != nil {
return err
}
return nil
}
// deviceStop loads a new device and calls its Stop() function.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool, stopHookNetnsPath string) error {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
logger.Debug("Stopping device")
dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if dev == nil {
return fmt.Errorf("Device stop validation failed for %q: %v", deviceName, err)
}
logger.Error("Device stop validation failed for", log.Ctx{"err": err})
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be stopped when instance is running")
}
runConf, err := dev.Stop()
if err != nil {
return err
}
if runConf != nil {
// If network interface settings returned, then detach NIC from container.
if len(runConf.NetworkInterface) > 0 {
err = d.deviceDetachNIC(configCopy, runConf.NetworkInterface, instanceRunning, stopHookNetnsPath)
if err != nil {
return err
}
}
// Add cgroup rules if requested and container is running.
if len(runConf.CGroups) > 0 && instanceRunning {
err = d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return err
}
}
// Detach mounts if requested and container is running.
if len(runConf.Mounts) > 0 && instanceRunning {
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return err
}
}
// Run post stop hooks irrespective of run state of instance.
err = d.runHooks(runConf.PostHooks)
if err != nil {
return err
}
}
return nil
}
// deviceDetachNIC detaches a NIC device from a container.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem, instanceRunning bool, stopHookNetnsPath string) error {
// Get requested device name to detach interface back to on the host.
devName := ""
for _, dev := range netIF {
if dev.Key == "link" {
devName = dev.Value
break
}
}
if devName == "" {
return fmt.Errorf("Device didn't provide a link property to use")
}
// If container is running, perform live detach of interface back to host.
if instanceRunning {
// For some reason, having network config confuses detach, so get our own go-lxc struct.
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err != nil {
return err
}
defer cc.Release()
// Get interfaces inside container.
ifaces, err := cc.Interfaces()
if err != nil {
return fmt.Errorf("Failed to list network interfaces: %v", err)
}
// If interface doesn't exist inside container, cannot proceed.
if !shared.StringInSlice(configCopy["name"], ifaces) {
return nil
}
err = cc.DetachInterfaceRename(configCopy["name"], devName)
if err != nil {
return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
}
} else {
// Currently liblxc does not move devices back to the host on stop that were added
// after the the container was started. For this reason we utilise the lxc.hook.stop
// hook so that we can capture the netns path, enter the namespace and move the nics
// back to the host and rename them if liblxc hasn't already done it.
// We can only move back devices that have an expected host_name record and where
// that device doesn't already exist on the host as if a device exists on the host
// we can't know whether that is because liblxc has moved it back already or whether
// it is a conflicting device.
if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", devName)) {
if stopHookNetnsPath == "" {
return fmt.Errorf("Cannot detach NIC device %q without stopHookNetnsPath being provided", devName)
}
err := d.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
if err != nil {
return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
}
d.logger.Debug("Detached NIC device interface", log.Ctx{"name": configCopy["name"], "devName": devName})
}
}
return nil
}
// deviceHandleMounts live attaches or detaches mounts on a container.
// If the mount DevPath is empty the mount action is treated as unmount.
func (d *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
for _, mount := range mounts {
if mount.DevPath != "" {
flags := 0
// Convert options into flags.
for _, opt := range mount.Opts {
if opt == "bind" {
flags |= unix.MS_BIND
} else if opt == "rbind" {
flags |= unix.MS_BIND | unix.MS_REC
}
}
shiftfs := false
if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic {
shiftfs = true
}
// Mount it into the container.
err := d.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
if err != nil {
return fmt.Errorf("Failed to add mount for device inside container: %s", err)
}
} else {
relativeTargetPath := strings.TrimPrefix(mount.TargetPath, "/")
if d.FileExists(relativeTargetPath) == nil {
err := d.removeMount(mount.TargetPath)
if err != nil {
return fmt.Errorf("Error unmounting the device path inside container: %s", err)
}
err = d.FileRemove(relativeTargetPath)
if err != nil {
// Only warn here and don't fail as removing a directory
// mount may fail if there was already files inside
// directory before it was mouted over preventing delete.
d.logger.Warn("Could not remove the device path inside container", log.Ctx{"err": err})
}
}
}
}
return nil
}
// deviceRemove loads a new device and calls its Remove() function.
func (d *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
dev, _, err := d.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if dev == nil {
return fmt.Errorf("Device remove validation failed for %q: %v", deviceName, err)
}
logger.Error("Device remove validation failed", log.Ctx{"err": err})
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be removed when instance is running")
}
return dev.Remove()
}
// deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
// that it is removed then added immediately afterwards.
func (d *lxc) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
volatileClear := make(map[string]string)
devicePrefix := fmt.Sprintf("volatile.%s.", devName)
newNICType, err := nictype.NICType(d.state, d.Project(), newConfig)
if err != nil {
return err
}
oldNICType, err := nictype.NICType(d.state, d.Project(), oldConfig)
if err != nil {
return err
}
// If the device type has changed, remove all old volatile keys.
// This will occur if the newConfig is empty (i.e the device is actually being removed) or
// if the device type is being changed but keeping the same name.
if newConfig["type"] != oldConfig["type"] || newNICType != oldNICType {
for k := range d.localConfig {
if !strings.HasPrefix(k, devicePrefix) {
continue
}
volatileClear[k] = ""
}
return d.VolatileSet(volatileClear)
}
// If the device type remains the same, then just remove any volatile keys that have
// the same key name present in the new config (i.e the new config is replacing the
// old volatile key).
for k := range d.localConfig {
if !strings.HasPrefix(k, devicePrefix) {
continue
}
devKey := strings.TrimPrefix(k, devicePrefix)
if _, found := newConfig[devKey]; found {
volatileClear[k] = ""
}
}
return d.VolatileSet(volatileClear)
}
// DeviceEventHandler actions the results of a RunConfig after an event has occurred on a device.
func (d *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
// Device events can only be processed when the container is running.
if !d.IsRunning() {
return nil
}
if runConf == nil {
return nil
}
// Shift device file ownership if needed before mounting devices into container.
if len(runConf.Mounts) > 0 {
err := d.deviceStaticShiftMounts(runConf.Mounts)
if err != nil {
return err
}
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return err
}
}
// Add cgroup rules if requested.
if len(runConf.CGroups) > 0 {
err := d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return err
}
}
// Run any post hooks requested.
err := d.runHooks(runConf.PostHooks)
if err != nil {
return err
}
// Generate uevent inside container if requested.
if len(runConf.Uevents) > 0 {
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
for _, eventParts := range runConf.Uevents {
ueventArray := make([]string, 6)
ueventArray[0] = "forkuevent"
ueventArray[2] = "inject"
ueventArray[1] = "--"
ueventArray[3] = fmt.Sprintf("%d", d.InitPID())
ueventArray[4] = fmt.Sprintf("%d", pidFdNr)
length := 0
for _, part := range eventParts {
length = length + len(part) + 1
}
ueventArray[5] = fmt.Sprintf("%d", length)
ueventArray = append(ueventArray, eventParts...)
_, _, err := shared.RunCommandSplit(nil, []*os.File{pidFd}, d.state.OS.ExecPath, ueventArray...)
if err != nil {
return err
}
}
}
return nil
}
// Start functions
func (d *lxc) startCommon() (string, []func() error, error) {
revert := revert.New()
defer revert.Fail()
// Load the go-lxc struct
err := d.initLXC(true)
if err != nil {
return "", nil, errors.Wrap(err, "Load go-lxc struct")
}
// Check that we're not already running
if d.IsRunning() {
return "", nil, fmt.Errorf("The container is already running")
}
// Load any required kernel modules
kernelModules := d.expandedConfig["linux.kernel_modules"]
if kernelModules != "" {
for _, module := range strings.Split(kernelModules, ",") {
module = strings.TrimPrefix(module, " ")
err := util.LoadModule(module)
if err != nil {
return "", nil, fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
}
}
}
// Rotate the log file.
logfile := d.LogFilePath()
if shared.PathExists(logfile) {
os.Remove(logfile + ".old")
err := os.Rename(logfile, logfile+".old")
if err != nil {
return "", nil, err
}
}
// Mount instance root volume.
_, err = d.mount()
if err != nil {
return "", nil, err
}
revert.Add(func() { d.unmount() })
/* Deal with idmap changes */
nextIdmap, err := d.NextIdmap()
if err != nil {
return "", nil, errors.Wrap(err, "Set ID map")
}
diskIdmap, err := d.DiskIdmap()
if err != nil {
return "", nil, errors.Wrap(err, "Set last ID map")
}
// Once mounted, check if filesystem needs shifting.
if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && d.state.OS.Shiftfs) {
if shared.IsTrue(d.expandedConfig["security.protection.shift"]) {
return "", nil, fmt.Errorf("Container is protected against filesystem shifting")
}
d.logger.Debug("Container idmap changed, remapping")
d.updateProgress("Remapping container filesystem")
storageType, err := d.getStorageType()
if err != nil {
return "", nil, errors.Wrap(err, "Storage type")
}
if diskIdmap != nil {
if storageType == "zfs" {
err = diskIdmap.UnshiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.UnshiftBtrfsRootfs(d.RootfsPath(), diskIdmap)
} else {
err = diskIdmap.UnshiftRootfs(d.RootfsPath(), nil)
}
if err != nil {
return "", nil, err
}
}
if nextIdmap != nil && !d.state.OS.Shiftfs {
if storageType == "zfs" {
err = nextIdmap.ShiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.ShiftBtrfsRootfs(d.RootfsPath(), nextIdmap)
} else {
err = nextIdmap.ShiftRootfs(d.RootfsPath(), nil)
}
if err != nil {
return "", nil, err
}
}
jsonDiskIdmap := "[]"
if nextIdmap != nil && !d.state.OS.Shiftfs {
idmapBytes, err := json.Marshal(nextIdmap.Idmap)
if err != nil {
return "", nil, err
}
jsonDiskIdmap = string(idmapBytes)
}
err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
if err != nil {
return "", nil, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", d.name, d.id)
}
d.updateProgress("")
}
var idmapBytes []byte
if nextIdmap == nil {
idmapBytes = []byte("[]")
} else {
idmapBytes, err = json.Marshal(nextIdmap.Idmap)
if err != nil {
return "", nil, err
}
}
if d.localConfig["volatile.idmap.current"] != string(idmapBytes) {
err = d.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
if err != nil {
return "", nil, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", d.name, d.id)
}
}
// Generate the Seccomp profile
if err := seccomp.CreateProfile(d.state, d); err != nil {
return "", nil, err
}
// Cleanup any existing leftover devices
d.removeUnixDevices()
d.removeDiskDevices()
// Create any missing directories.
err = os.MkdirAll(d.LogPath(), 0700)
if err != nil {
return "", nil, err
}
err = os.MkdirAll(d.DevicesPath(), 0711)
if err != nil {
return "", nil, err
}
err = os.MkdirAll(d.ShmountsPath(), 0711)
if err != nil {
return "", nil, err
}
// Generate UUID if not present.
instUUID := d.localConfig["volatile.uuid"]
if instUUID == "" {
instUUID = uuid.New()
d.VolatileSet(map[string]string{"volatile.uuid": instUUID})
}
// Create the devices
postStartHooks := []func() error{}
nicID := -1
// Setup devices in sorted order, this ensures that device mounts are added in path order.
for _, entry := range d.expandedDevices.Sorted() {
dev := entry // Ensure device variable has local scope for revert.
// Start the device.
runConf, err := d.deviceStart(dev.Name, dev.Config, false)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
// Stop device on failure to setup container.
revert.Add(func() {
err := d.deviceStop(dev.Name, dev.Config, false, "")
if err != nil {
d.logger.Error("Failed to cleanup device", log.Ctx{"devName": dev.Name, "err": err})
}
})
if runConf == nil {
continue
}
// Process rootfs setup.
if runConf.RootFS.Path != "" {
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
// Set the rootfs backend type if supported (must happen before any other lxc.rootfs)
err := lxcSetConfigItem(d.c, "lxc.rootfs.backend", "dir")
if err == nil {
value := d.c.ConfigItem("lxc.rootfs.backend")
if len(value) == 0 || value[0] != "dir" {
lxcSetConfigItem(d.c, "lxc.rootfs.backend", "")
}
}
}
if util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
rootfsPath := fmt.Sprintf("dir:%s", runConf.RootFS.Path)
err = lxcSetConfigItem(d.c, "lxc.rootfs.path", rootfsPath)
} else {
err = lxcSetConfigItem(d.c, "lxc.rootfs", runConf.RootFS.Path)
}
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
}
if len(runConf.RootFS.Opts) > 0 {
err = lxcSetConfigItem(d.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
}
}
if d.state.OS.Shiftfs && !d.IsPrivileged() && diskIdmap == nil {
// Host side mark mount.
err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
// Container side shift mount.
err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
// Host side umount of mark mount.
err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
}
}
// Pass any cgroups rules into LXC.
if len(runConf.CGroups) > 0 {
for _, rule := range runConf.CGroups {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup2.%s", rule.Key), rule.Value)
} else {
err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
}
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device cgroup '%s'", dev.Name)
}
}
}
// Pass any mounts into LXC.
if len(runConf.Mounts) > 0 {
for _, mount := range runConf.Mounts {
if shared.StringInSlice("propagation", mount.Opts) && !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
return "", nil, errors.Wrapf(fmt.Errorf("liblxc 3.0 is required for mount propagation configuration"), "Failed to setup device mount '%s'", dev.Name)
}
if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic && !d.IsPrivileged() {
if !d.state.OS.Shiftfs {
return "", nil, errors.Wrapf(fmt.Errorf("shiftfs is required but isn't supported on system"), "Failed to setup device mount '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
}
mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, strings.Join(mount.Opts, ","), mount.Freq, mount.PassNo)
err = lxcSetConfigItem(d.c, "lxc.mount.entry", mntVal)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount '%s'", dev.Name)
}
}
}
// Pass any network setup config into LXC.
if len(runConf.NetworkInterface) > 0 {
// Increment nicID so that LXC network index is unique per device.
nicID++
networkKeyPrefix := "lxc.net"
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
networkKeyPrefix = "lxc.network"
}
for _, nicItem := range runConf.NetworkInterface {
err = lxcSetConfigItem(d.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device network interface '%s'", dev.Name)
}
}
}
// Add any post start hooks.
if len(runConf.PostHooks) > 0 {
postStartHooks = append(postStartHooks, runConf.PostHooks...)
}
}
// Generate the LXC config
configPath := filepath.Join(d.LogPath(), "lxc.conf")
err = d.c.SaveConfigFile(configPath)
if err != nil {
os.Remove(configPath)
return "", nil, err
}
// Set ownership to match container root
currentIdmapset, err := d.CurrentIdmap()
if err != nil {
return "", nil, err
}
uid := int64(0)
if currentIdmapset != nil {
uid, _ = currentIdmapset.ShiftFromNs(0, 0)
}
err = os.Chown(d.Path(), int(uid), 0)
if err != nil {
return "", nil, err
}
// We only need traversal by root in the container
err = os.Chmod(d.Path(), 0100)
if err != nil {
return "", nil, err
}
// Update the backup.yaml file
err = d.UpdateBackupFile()
if err != nil {
return "", nil, err
}
// If starting stateless, wipe state
if !d.IsStateful() && shared.PathExists(d.StatePath()) {
os.RemoveAll(d.StatePath())
}
// Unmount any previously mounted shiftfs
unix.Unmount(d.RootfsPath(), unix.MNT_DETACH)
revert.Success()
return configPath, postStartHooks, nil
}
// detachInterfaceRename enters the container's network namespace and moves the named interface
// in ifName back to the network namespace of the running process as the name specified in hostName.
func (d *lxc) detachInterfaceRename(netns string, ifName string, hostName string) error {
lxdPID := os.Getpid()
// Run forknet detach
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forknet",
"detach",
"--",
netns,
fmt.Sprintf("%d", lxdPID),
ifName,
hostName,
)
// Process forknet detach response
if err != nil {
return err
}
return nil
}
// Start starts the instance.
func (d *lxc) Start(stateful bool) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "start", []string{"restart", "restore"}, false, false)
if err != nil {
return errors.Wrap(err, "Create container start operation")
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
defer op.Done(nil)
if !daemon.SharedMountsSetup {
err = fmt.Errorf("Daemon failed to setup shared mounts base. Does security.nesting need to be turned on?")
op.Done(err)
return err
}
// Run the shared start code
configPath, postStartHooks, err := d.startCommon()
if err != nil {
op.Done(err)
return errors.Wrap(err, "Failed preparing container for start")
}
ctxMap = log.Ctx{
"action": op.Action(),
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": stateful}
if op.Action() == "start" {
d.logger.Info("Starting container", ctxMap)
}
// If stateful, restore now
if stateful {
if !d.stateful {
err = fmt.Errorf("Container has no existing state to restore")
op.Done(err)
return err
}
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_RESTORE,
StateDir: d.StatePath(),
Function: "snapshot",
Stop: false,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
err := d.Migrate(&criuMigrationArgs)
if err != nil && !d.IsRunning() {
op.Done(err)
return errors.Wrap(err, "Migrate")
}
os.RemoveAll(d.StatePath())
d.stateful = false
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
if err != nil {
op.Done(err)
return errors.Wrap(err, "Start container")
}
// Run any post start hooks.
err = d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
op.Done(err)
return err
}
if op.Action() == "start" {
d.logger.Info("Started container", ctxMap)
d.lifecycle("started", nil)
}
return nil
} else if d.stateful {
/* stateless start required when we have state, let's delete it */
err := os.RemoveAll(d.StatePath())
if err != nil {
op.Done(err)
return err
}
d.stateful = false
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
if err != nil {
op.Done(err)
return errors.Wrap(err, "Persist stateful flag")
}
}
name := project.Instance(d.Project(), d.name)
// Start the LXC container
_, err = shared.RunCommand(
d.state.OS.ExecPath,
"forkstart",
name,
d.state.OS.LxcPath,
configPath)
if err != nil && !d.IsRunning() {
// Attempt to extract the LXC errors
lxcLog := ""
logPath := filepath.Join(d.LogPath(), "lxc.log")
if shared.PathExists(logPath) {
logContent, err := ioutil.ReadFile(logPath)
if err == nil {
for _, line := range strings.Split(string(logContent), "\n") {
fields := strings.Fields(line)
if len(fields) < 4 {
continue
}
// We only care about errors
if fields[2] != "ERROR" {
continue
}
// Prepend the line break
if len(lxcLog) == 0 {
lxcLog += "\n"
}
lxcLog += fmt.Sprintf(" %s\n", strings.Join(fields[0:], " "))
}
}
}
d.logger.Error("Failed starting container", ctxMap)
// Return the actual error
op.Done(err)
return err
}
// Run any post start hooks.
err = d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
op.Done(err)
return err
}
if op.Action() == "start" {
d.logger.Info("Started container", ctxMap)
d.lifecycle("started", nil)
}
return nil
}
// OnHook is the top-level hook handler.
func (d *lxc) OnHook(hookName string, args map[string]string) error {
switch hookName {
case instance.HookStart:
return d.onStart(args)
case instance.HookStopNS:
return d.onStopNS(args)
case instance.HookStop:
return d.onStop(args)
default:
return instance.ErrNotImplemented
}
}
// onStart implements the start hook.
func (d *lxc) onStart(_ map[string]string) error {
// Make sure we can't call go-lxc functions by mistake
d.fromHook = true
// Load the container AppArmor profile
err := apparmor.InstanceLoad(d.state, d)
if err != nil {
return err
}
// Template anything that needs templating
key := "volatile.apply_template"
if d.localConfig[key] != "" {
// Run any template that needs running
err = d.templateApplyNow(instance.TemplateTrigger(d.localConfig[key]))
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
// Remove the volatile key from the DB
err := d.state.Cluster.DeleteInstanceConfigKey(d.id, key)
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
}
err = d.templateApplyNow("start")
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
// Trigger a rebalance
cgroup.TaskSchedulerTrigger("container", d.name, "started")
// Apply network priority
if d.expandedConfig["limits.network.priority"] != "" {
go func(d *lxc) {
d.fromHook = false
err := d.setNetworkPriority()
if err != nil {
d.logger.Error("Failed to apply network priority", log.Ctx{"err": err})
}
}(d)
}
// Database updates
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Record current state
err = tx.UpdateInstancePowerState(d.id, "RUNNING")
if err != nil {
return errors.Wrap(err, "Error updating container state")
}
// Update time container last started time
err = tx.UpdateInstanceLastUsedDate(d.id, time.Now().UTC())
if err != nil {
return errors.Wrap(err, "Error updating last used")
}
return nil
})
if err != nil {
return err
}
return nil
}
// Stop functions
func (d *lxc) Stop(stateful bool) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "stop", []string{"restart", "restore"}, false, true)
if err != nil {
return err
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
// Check that we're not already stopped
if !d.IsRunning() {
err = fmt.Errorf("The container is already stopped")
op.Done(err)
return err
}
ctxMap = log.Ctx{
"action": op.Action(),
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": stateful}
if op.Action() == "stop" {
d.logger.Info("Stopping container", ctxMap)
}
// Handle stateful stop
if stateful {
// Cleanup any existing state
stateDir := d.StatePath()
os.RemoveAll(stateDir)
err := os.MkdirAll(stateDir, 0700)
if err != nil {
op.Done(err)
return err
}
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_DUMP,
StateDir: stateDir,
Function: "snapshot",
Stop: true,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
// Checkpoint
err = d.Migrate(&criuMigrationArgs)
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
d.stateful = true
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, true)
if err != nil {
d.logger.Error("Failed stopping container", ctxMap)
return err
}
d.logger.Info("Stopped container", ctxMap)
d.lifecycle("stopped", nil)
return nil
} else if shared.PathExists(d.StatePath()) {
os.RemoveAll(d.StatePath())
}
// Load the go-lxc struct
if d.expandedConfig["raw.lxc"] != "" {
err = d.initLXC(true)
if err != nil {
op.Done(err)
return err
}
} else {
err = d.initLXC(false)
if err != nil {
op.Done(err)
return err
}
}
// Load cgroup abstraction
cg, err := d.cgroup(nil)
if err != nil {
op.Done(err)
return err
}
// Fork-bomb mitigation, prevent forking from this point on
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
// Attempt to disable forking new processes
cg.SetMaxProcesses(0)
} else if d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
// Attempt to freeze the container
freezer := make(chan bool, 1)
go func() {
d.Freeze()
freezer <- true
}()
select {
case <-freezer:
case <-time.After(time.Second * 5):
d.Unfreeze()
}
}
err = d.c.Stop()
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
if op.Action() == "stop" {
d.logger.Info("Stopped container", ctxMap)
d.lifecycle("stopped", nil)
}
return nil
}
// Shutdown stops the instance.
func (d *lxc) Shutdown(timeout time.Duration) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "stop", []string{"restart"}, true, false)
if err != nil {
return err
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
// If frozen, resume so the signal can be handled.
if d.IsFrozen() {
err := d.Unfreeze()
if err != nil {
return err
}
}
// Check that we're not already stopped
if !d.IsRunning() {
err = fmt.Errorf("The container is already stopped")
op.Done(err)
return err
}
ctxMap = log.Ctx{
"action": "shutdown",
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"timeout": timeout}
if op.Action() == "stop" {
d.logger.Info("Shutting down container", ctxMap)
}
// Load the go-lxc struct
if d.expandedConfig["raw.lxc"] != "" {
err = d.initLXC(true)
if err != nil {
op.Done(err)
return err
}
} else {
err = d.initLXC(false)
if err != nil {
op.Done(err)
return err
}
}
err = d.c.Shutdown(timeout)
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
if op.Action() == "stop" {
d.logger.Info("Shut down container", ctxMap)
d.lifecycle("shutdown", nil)
}
return nil
}
// Restart restart the instance.
func (d *lxc) Restart(timeout time.Duration) error {
ctxMap := log.Ctx{
"action": "shutdown",
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"timeout": timeout}
d.logger.Info("Restarting container", ctxMap)
err := d.restart(d, timeout)
if err != nil {
return err
}
d.logger.Info("Restarted container", ctxMap)
d.lifecycle("restarted", nil)
return nil
}
// onStopNS is triggered by LXC's stop hook once a container is shutdown but before the container's
// namespaces have been closed. The netns path of the stopped container is provided.
func (d *lxc) onStopNS(args map[string]string) error {
target := args["target"]
netns := args["netns"]
// Validate target.
if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
d.logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"target": target})
return fmt.Errorf("Invalid stop target %q", target)
}
// Clean up devices.
d.cleanupDevices(false, netns)
return nil
}
// onStop is triggered by LXC's post-stop hook once a container is shutdown and after the
// container's namespaces have been closed.
func (d *lxc) onStop(args map[string]string) error {
var err error
target := args["target"]
// Validate target
if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
d.logger.Error("Container sent invalid target to OnStop", log.Ctx{"target": target})
return fmt.Errorf("Invalid stop target: %s", target)
}
// Pick up the existing stop operation lock created in Stop() function.
op := operationlock.Get(d.id)
if op != nil && !shared.StringInSlice(op.Action(), []string{"stop", "restart", "restore"}) {
return fmt.Errorf("Container is already running a %s operation", op.Action())
}
if op == nil && target == "reboot" {
op, err = operationlock.Create(d.id, "restart", false, false)
if err != nil {
return errors.Wrap(err, "Create restart operation")
}
}
// Make sure we can't call go-lxc functions by mistake
d.fromHook = true
// Log user actions
ctxMap := log.Ctx{
"action": target,
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": false}
if op == nil {
d.logger.Debug("Container initiated", ctxMap)
}
// Record power state
err = d.state.Cluster.UpdateInstancePowerState(d.id, "STOPPED")
if err != nil {
err = errors.Wrap(err, "Failed to set container state")
op.Done(err)
return err
}
go func(d *lxc, target string, op *operationlock.InstanceOperation) {
d.fromHook = false
err = nil
// Unlock on return
defer op.Done(nil)
// Wait for other post-stop actions to be done and the container actually stopping.
d.IsRunning()
d.logger.Debug("Container stopped, cleaning up")
// Clean up devices.
d.cleanupDevices(false, "")
// Remove directory ownership (to avoid issue if uidmap is re-used)
err := os.Chown(d.Path(), 0, 0)
if err != nil {
op.Done(errors.Wrap(err, "Failed clearing ownership"))
return
}
err = os.Chmod(d.Path(), 0100)
if err != nil {
op.Done(errors.Wrap(err, "Failed clearing permissions"))
return
}
// Stop the storage for this container
_, err = d.unmount()
if err != nil {
op.Done(errors.Wrap(err, "Failed unmounting container"))
return
}
// Unload the apparmor profile
err = apparmor.InstanceUnload(d.state, d)
if err != nil {
op.Done(errors.Wrap(err, "Failed to destroy apparmor namespace"))
return
}
// Clean all the unix devices
err = d.removeUnixDevices()
if err != nil {
op.Done(errors.Wrap(err, "Failed to remove unix devices"))
return
}
// Clean all the disk devices
err = d.removeDiskDevices()
if err != nil {
op.Done(errors.Wrap(err, "Failed to remove disk devices"))
return
}
// Log and emit lifecycle if not user triggered
if op == nil {
d.logger.Info("Shut down container", ctxMap)
d.lifecycle("shutdown", nil)
}
// Reboot the container
if target == "reboot" {
// Start the container again
err = d.Start(false)
if err != nil {
op.Done(errors.Wrap(err, "Failed restarting container"))
return
}
d.lifecycle("restarted", nil)
return
}
// Trigger a rebalance
cgroup.TaskSchedulerTrigger("container", d.name, "stopped")
// Destroy ephemeral containers
if d.ephemeral {
err = d.Delete(true)
if err != nil {
op.Done(errors.Wrap(err, "Failed deleting ephemeral container"))
return
}
}
}(d, target, op)
return nil
}
// cleanupDevices performs any needed device cleanup steps when container is stopped.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
for _, dev := range d.expandedDevices.Reversed() {
// Only stop NIC devices when run from the onStopNS hook, and stop all other devices when run from
// the onStop hook. This way disk devices are stopped after the instance has been fully stopped.
if (stopHookNetnsPath != "" && dev.Config["type"] != "nic") || (stopHookNetnsPath == "" && dev.Config["type"] == "nic") {
continue
}
// Use the device interface if device supports it.
err := d.deviceStop(dev.Name, dev.Config, instanceRunning, stopHookNetnsPath)
if err == device.ErrUnsupportedDevType {
continue
} else if err != nil {
d.logger.Error("Failed to stop device", log.Ctx{"devName": dev.Name, "err": err})
}
}
}
// Freeze functions.
func (d *lxc) Freeze() error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
// Check that we're running
if !d.IsRunning() {
return fmt.Errorf("The container isn't running")
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check if the CGroup is available
if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
d.logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
return nil
}
// Check that we're not already frozen
if d.IsFrozen() {
return fmt.Errorf("The container is already frozen")
}
d.logger.Info("Freezing container", ctxMap)
// Load the go-lxc struct
err = d.initLXC(false)
if err != nil {
ctxMap["err"] = err
d.logger.Error("Failed freezing container", ctxMap)
return err
}
err = d.c.Freeze()
if err != nil {
ctxMap["err"] = err
d.logger.Error("Failed freezing container", ctxMap)
return err
}
d.logger.Info("Froze container", ctxMap)
d.lifecycle("paused", nil)
return err
}
// Unfreeze unfreezes the instance.
func (d *lxc) Unfreeze() error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
// Check that we're running
if !d.IsRunning() {
return fmt.Errorf("The container isn't running")
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check if the CGroup is available
if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
d.logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
return nil
}
// Check that we're frozen
if !d.IsFrozen() {
return fmt.Errorf("The container is already running")
}
d.logger.Info("Unfreezing container", ctxMap)
// Load the go-lxc struct
err = d.initLXC(false)
if err != nil {
d.logger.Error("Failed unfreezing container", ctxMap)
return err
}
err = d.c.Unfreeze()
if err != nil {
d.logger.Error("Failed unfreezing container", ctxMap)
}
d.logger.Info("Unfroze container", ctxMap)
d.lifecycle("resumed", nil)
return err
}
// Get lxc container state, with 1 second timeout
// If we don't get a reply, assume the lxc monitor is hung
func (d *lxc) getLxcState() (liblxc.State, error) {
if d.IsSnapshot() {
return liblxc.StateMap["STOPPED"], nil
}
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return liblxc.StateMap["STOPPED"], err
}
if d.c == nil {
return liblxc.StateMap["STOPPED"], nil
}
monitor := make(chan liblxc.State, 1)
go func(c *liblxc.Container) {
monitor <- c.State()
}(d.c)
select {
case state := <-monitor:
return state, nil
case <-time.After(5 * time.Second):
return liblxc.StateMap["FROZEN"], fmt.Errorf("Monitor is hung")
}
}
// Render renders the state of the instance.
func (d *lxc) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
// Ignore err as the arch string on error is correct (unknown)
architectureName, _ := osarch.ArchitectureName(d.architecture)
if d.IsSnapshot() {
// Prepare the ETag
etag := []interface{}{d.expiryDate}
snapState := api.InstanceSnapshot{
CreatedAt: d.creationDate,
ExpandedConfig: d.expandedConfig,
ExpandedDevices: d.expandedDevices.CloneNative(),
LastUsedAt: d.lastUsedDate,
Name: strings.SplitN(d.name, "/", 2)[1],
Stateful: d.stateful,
Size: -1, // Default to uninitialised/error state (0 means no CoW usage).
}
snapState.Architecture = architectureName
snapState.Config = d.localConfig
snapState.Devices = d.localDevices.CloneNative()
snapState.Ephemeral = d.ephemeral
snapState.Profiles = d.profiles
snapState.ExpiresAt = d.expiryDate
for _, option := range options {
err := option(&snapState)
if err != nil {
return nil, nil, err
}
}
return &snapState, etag, nil
}
// Prepare the ETag
etag := []interface{}{d.architecture, d.localConfig, d.localDevices, d.ephemeral, d.profiles}
// FIXME: Render shouldn't directly access the go-lxc struct
cState, err := d.getLxcState()
if err != nil {
return nil, nil, errors.Wrap(err, "Get container stated")
}
statusCode := lxcStatusCode(cState)
instState := api.Instance{
ExpandedConfig: d.expandedConfig,
ExpandedDevices: d.expandedDevices.CloneNative(),
Name: d.name,
Status: statusCode.String(),
StatusCode: statusCode,
Location: d.node,
Type: d.Type().String(),
}
instState.Description = d.description
instState.Architecture = architectureName
instState.Config = d.localConfig
instState.CreatedAt = d.creationDate
instState.Devices = d.localDevices.CloneNative()
instState.Ephemeral = d.ephemeral
instState.LastUsedAt = d.lastUsedDate
instState.Profiles = d.profiles
instState.Stateful = d.stateful
for _, option := range options {
err := option(&instState)
if err != nil {
return nil, nil, err
}
}
return &instState, etag, nil
}
// RenderFull renders the full state of the instance.
func (d *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
if d.IsSnapshot() {
return nil, nil, fmt.Errorf("RenderFull only works with containers")
}
// Get the Container struct
base, etag, err := d.Render()
if err != nil {
return nil, nil, err
}
// Convert to ContainerFull
ct := api.InstanceFull{Instance: *base.(*api.Instance)}
// Add the ContainerState
ct.State, err = d.RenderState()
if err != nil {
return nil, nil, err
}
// Add the ContainerSnapshots
snaps, err := d.Snapshots()
if err != nil {
return nil, nil, err
}
for _, snap := range snaps {
render, _, err := snap.Render()
if err != nil {
return nil, nil, err
}
if ct.Snapshots == nil {
ct.Snapshots = []api.InstanceSnapshot{}
}
ct.Snapshots = append(ct.Snapshots, *render.(*api.InstanceSnapshot))
}
// Add the ContainerBackups
backups, err := d.Backups()
if err != nil {
return nil, nil, err
}
for _, backup := range backups {
render := backup.Render()
if ct.Backups == nil {
ct.Backups = []api.InstanceBackup{}
}
ct.Backups = append(ct.Backups, *render)
}
return &ct, etag, nil
}
// RenderState renders just the running state of the instance.
func (d *lxc) RenderState() (*api.InstanceState, error) {
cState, err := d.getLxcState()
if err != nil {
return nil, err
}
statusCode := lxcStatusCode(cState)
status := api.InstanceState{
Status: statusCode.String(),
StatusCode: statusCode,
}
if d.IsRunning() {
pid := d.InitPID()
status.CPU = d.cpuState()
status.Memory = d.memoryState()
status.Network = d.networkState()
status.Pid = int64(pid)
status.Processes = d.processesState()
}
status.Disk = d.diskState()
return &status, nil
}
// Restore restores a snapshot.
func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
var ctxMap log.Ctx
op, err := operationlock.Create(d.id, "restore", false, false)
if err != nil {
return errors.Wrap(err, "Create restore operation")
}
defer op.Done(nil)
// Stop the container.
wasRunning := false
if d.IsRunning() {
wasRunning = true
ephemeral := d.IsEphemeral()
if ephemeral {
// Unset ephemeral flag.
args := db.InstanceArgs{
Architecture: d.Architecture(),
Config: d.LocalConfig(),
Description: d.Description(),
Devices: d.LocalDevices(),
Ephemeral: false,
Profiles: d.Profiles(),
Project: d.Project(),
Type: d.Type(),
Snapshot: d.IsSnapshot(),
}
err := d.Update(args, false)
if err != nil {
op.Done(err)
return err
}
// On function return, set the flag back on.
defer func() {
args.Ephemeral = ephemeral
d.Update(args, false)
}()
}
// This will unmount the container storage.
err := d.Stop(false)
if err != nil {
op.Done(err)
return err
}
// Refresh the operation as that one is now complete.
op, err = operationlock.Create(d.id, "restore", false, false)
if err != nil {
return errors.Wrap(err, "Create restore operation")
}
defer op.Done(nil)
}
ctxMap = log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"source": sourceContainer.Name()}
d.logger.Info("Restoring container", ctxMap)
// Initialize storage interface for the container and mount the rootfs for criu state check.
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
op.Done(err)
return err
}
d.logger.Debug("Mounting instance to check for CRIU state path existence")
// Ensure that storage is mounted for state path checks and for backup.yaml updates.
_, err = pool.MountInstance(d, nil)
if err != nil {
op.Done(err)
return err
}
// Check for CRIU if necessary, before doing a bunch of filesystem manipulations.
// Requires container be mounted to check StatePath exists.
if shared.PathExists(d.StatePath()) {
_, err := exec.LookPath("criu")
if err != nil {
err = fmt.Errorf("Failed to restore container state. CRIU isn't installed")
op.Done(err)
return err
}
}
_, err = pool.UnmountInstance(d, nil)
if err != nil {
op.Done(err)
return err
}
// Restore the rootfs.
err = pool.RestoreInstanceSnapshot(d, sourceContainer, nil)
if err != nil {
op.Done(err)
return err
}
// Restore the configuration.
args := db.InstanceArgs{
Architecture: sourceContainer.Architecture(),
Config: sourceContainer.LocalConfig(),
Description: sourceContainer.Description(),
Devices: sourceContainer.LocalDevices(),
Ephemeral: sourceContainer.IsEphemeral(),
Profiles: sourceContainer.Profiles(),
Project: sourceContainer.Project(),
Type: sourceContainer.Type(),
Snapshot: sourceContainer.IsSnapshot(),
}
// Don't pass as user-requested as there's no way to fix a bad config.
// This will call d.UpdateBackupFile() to ensure snapshot list is up to date.
err = d.Update(args, false)
if err != nil {
op.Done(err)
return err
}
// If the container wasn't running but was stateful, should we restore it as running?
if stateful == true {
if !shared.PathExists(d.StatePath()) {
err = fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
op.Done(err)
return err
}
d.logger.Debug("Performing stateful restore", ctxMap)
d.stateful = true
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_RESTORE,
StateDir: d.StatePath(),
Function: "snapshot",
Stop: false,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
// Checkpoint.
err := d.Migrate(&criuMigrationArgs)
if err != nil {
op.Done(err)
return err
}
// Remove the state from the parent container; we only keep this in snapshots.
err2 := os.RemoveAll(d.StatePath())
if err2 != nil && !os.IsNotExist(err) {
op.Done(err)
return err
}
if err != nil {
op.Done(err)
return err
}
d.logger.Debug("Performed stateful restore", ctxMap)
d.logger.Info("Restored container", ctxMap)
return nil
}
// Restart the container.
if wasRunning {
d.logger.Debug("Starting instance after snapshot restore")
err = d.Start(false)
if err != nil {
op.Done(err)
return err
}
}
d.lifecycle("restored", map[string]interface{}{"snapshot": sourceContainer.Name()})
d.logger.Info("Restored container", ctxMap)
return nil
}
func (d *lxc) cleanup() {
// Unmount any leftovers
d.removeUnixDevices()
d.removeDiskDevices()
// Remove the security profiles
apparmor.InstanceDelete(d.state, d)
seccomp.DeleteProfile(d)
// Remove the devices path
os.Remove(d.DevicesPath())
// Remove the shmounts path
os.RemoveAll(d.ShmountsPath())
}
// Delete deletes the instance.
func (d *lxc) Delete(force bool) error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
d.logger.Info("Deleting container", ctxMap)
if !force && shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
err := fmt.Errorf("Container is protected")
d.logger.Warn("Failed to delete container", log.Ctx{"err": err})
return err
}
isImport := false
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil && err != db.ErrNoSuchObject {
return err
} else if pool != nil {
// Check if we're dealing with "lxd import".
// "lxd import" is used for disaster recovery, where you already have a container
// and snapshots on disk but no DB entry. As such if something has gone wrong during
// the creation of the instance and we are now being asked to delete the instance,
// we should not remove the storage volumes themselves as this would cause data loss.
cName, _, _ := shared.InstanceGetParentAndSnapshotName(d.Name())
importingFilePath := storagePools.InstanceImportingFilePath(d.Type(), pool.Name(), d.Project(), cName)
if shared.PathExists(importingFilePath) {
isImport = true
}
if d.IsSnapshot() {
if !isImport {
// Remove snapshot volume and database record.
err = pool.DeleteInstanceSnapshot(d, nil)
if err != nil {
return err
}
}
} else {
// Remove all snapshots by initialising each snapshot as an Instance and
// calling its Delete function.
err := instance.DeleteSnapshots(d.state, d.Project(), d.Name())
if err != nil {
d.logger.Error("Failed to delete instance snapshots", log.Ctx{"err": err})
return err
}
if !isImport {
// Remove the storage volume, snapshot volumes and database records.
err = pool.DeleteInstance(d, nil)
if err != nil {
return err
}
}
}
}
// Perform other cleanup steps if not snapshot.
if !d.IsSnapshot() {
// Remove all backups.
backups, err := d.Backups()
if err != nil {
return err
}
for _, backup := range backups {
err = backup.Delete()
if err != nil {
return err
}
}
// Delete the MAAS entry.
err = d.maasDelete()
if err != nil {
d.logger.Error("Failed deleting container MAAS record", log.Ctx{"err": err})
return err
}
// Remove devices from container.
for k, m := range d.expandedDevices {
err = d.deviceRemove(k, m, false)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device %q", k)
}
}
// Clean things up.
d.cleanup()
}
// Remove the database record of the instance or snapshot instance.
if err := d.state.Cluster.DeleteInstance(d.project, d.Name()); err != nil {
d.logger.Error("Failed deleting container entry", log.Ctx{"err": err})
return err
}
// If dealing with a snapshot, refresh the backup file on the parent.
if d.IsSnapshot() && !isImport {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
// Load the parent.
parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
return errors.Wrap(err, "Invalid parent")
}
// Update the backup file.
err = parent.UpdateBackupFile()
if err != nil {
return err
}
}
d.logger.Info("Deleted container", ctxMap)
d.lifecycle("deleted", nil)
return nil
}
// Rename renames the instance. Accepts an argument to enable applying deferred TemplateTriggerRename.
func (d *lxc) Rename(newName string, applyTemplateTrigger bool) error {
oldName := d.Name()
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"newname": newName}
d.logger.Info("Renaming container", ctxMap)
// Sanity checks.
err := instance.ValidName(newName, d.IsSnapshot())
if err != nil {
return err
}
if d.IsRunning() {
return fmt.Errorf("Renaming of running container not allowed")
}
// Clean things up.
d.cleanup()
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
return errors.Wrap(err, "Load instance storage pool")
}
if d.IsSnapshot() {
_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
err = pool.RenameInstanceSnapshot(d, newSnapName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance snapshot")
}
} else {
err = pool.RenameInstance(d, newName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance")
}
if applyTemplateTrigger {
err = d.DeferTemplateApply(instance.TemplateTriggerRename)
if err != nil {
return err
}
}
}
if !d.IsSnapshot() {
// Rename all the instance snapshot database entries.
results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
if err != nil {
d.logger.Error("Failed to get container snapshots", ctxMap)
return errors.Wrapf(err, "Failed to get container snapshots")
}
for _, sname := range results {
// Rename the snapshot.
oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
baseSnapName := filepath.Base(sname)
err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
})
if err != nil {
d.logger.Error("Failed renaming snapshot", ctxMap)
return errors.Wrapf(err, "Failed renaming snapshot")
}
}
}
// Rename the instance database entry.
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
if d.IsSnapshot() {
oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
return tx.RenameInstanceSnapshot(d.project, oldParts[0], oldParts[1], newParts[1])
}
return tx.RenameInstance(d.project, oldName, newName)
})
if err != nil {
d.logger.Error("Failed renaming container", ctxMap)
return errors.Wrapf(err, "Failed renaming container")
}
// Rename the logging path.
newFullName := project.Instance(d.Project(), d.Name())
os.RemoveAll(shared.LogPath(newFullName))
if shared.PathExists(d.LogPath()) {
err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
if err != nil {
d.logger.Error("Failed renaming container", ctxMap)
return errors.Wrapf(err, "Failed renaming container")
}
}
// Rename the MAAS entry.
if !d.IsSnapshot() {
err = d.maasRename(newName)
if err != nil {
return err
}
}
revert := revert.New()
defer revert.Fail()
// Set the new name in the struct.
d.name = newName
revert.Add(func() { d.name = oldName })
// Rename the backups.
backups, err := d.Backups()
if err != nil {
return err
}
for _, backup := range backups {
b := backup
oldName := b.Name()
backupName := strings.Split(oldName, "/")[1]
newName := fmt.Sprintf("%s/%s", newName, backupName)
err = b.Rename(newName)
if err != nil {
return err
}
revert.Add(func() { b.Rename(oldName) })
}
// Invalidate the go-lxc cache.
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
// Update lease files.
network.UpdateDNSMasqStatic(d.state, "")
err = d.UpdateBackupFile()
if err != nil {
return err
}
d.logger.Info("Renamed container", ctxMap)
d.lifecycle("renamed", map[string]interface{}{"old_name": oldName})
revert.Success()
return nil
}
// CGroupSet sets a cgroup value for the instance.
func (d *lxc) CGroupSet(key string, value string) error {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return err
}
// Make sure the container is running
if !d.IsRunning() {
return fmt.Errorf("Can't set cgroups on a stopped container")
}
err = d.c.SetCgroupItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err)
}
return nil
}
// Update applies updated config.
func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error {
// Set sane defaults for unset keys
if args.Project == "" {
args.Project = project.Default
}
if args.Architecture == 0 {
args.Architecture = d.architecture
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.Devices == nil {
args.Devices = deviceConfig.Devices{}
}
if args.Profiles == nil {
args.Profiles = []string{}
}
if userRequested {
// Validate the new config
err := instance.ValidConfig(d.state.OS, args.Config, false, false)
if err != nil {
return errors.Wrap(err, "Invalid config")
}
// Validate the new devices without using expanded devices validation (expensive checks disabled).
err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), args.Devices, false)
if err != nil {
return errors.Wrap(err, "Invalid devices")
}
}
// Validate the new profiles
profiles, err := d.state.Cluster.GetProfileNames(args.Project)
if err != nil {
return errors.Wrap(err, "Failed to get profiles")
}
checkedProfiles := []string{}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
if shared.StringInSlice(profile, checkedProfiles) {
return fmt.Errorf("Duplicate profile found in request")
}
checkedProfiles = append(checkedProfiles, profile)
}
// Validate the new architecture
if args.Architecture != 0 {
_, err = osarch.ArchitectureName(args.Architecture)
if err != nil {
return fmt.Errorf("Invalid architecture id: %s", err)
}
}
// Get a copy of the old configuration
oldDescription := d.Description()
oldArchitecture := 0
err = shared.DeepCopy(&d.architecture, &oldArchitecture)
if err != nil {
return err
}
oldEphemeral := false
err = shared.DeepCopy(&d.ephemeral, &oldEphemeral)
if err != nil {
return err
}
oldExpandedDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&d.expandedDevices, &oldExpandedDevices)
if err != nil {
return err
}
oldExpandedConfig := map[string]string{}
err = shared.DeepCopy(&d.expandedConfig, &oldExpandedConfig)
if err != nil {
return err
}
oldLocalDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&d.localDevices, &oldLocalDevices)
if err != nil {
return err
}
oldLocalConfig := map[string]string{}
err = shared.DeepCopy(&d.localConfig, &oldLocalConfig)
if err != nil {
return err
}
oldProfiles := []string{}
err = shared.DeepCopy(&d.profiles, &oldProfiles)
if err != nil {
return err
}
oldExpiryDate := d.expiryDate
// Define a function which reverts everything. Defer this function
// so that it doesn't need to be explicitly called in every failing
// return path. Track whether or not we want to undo the changes
// using a closure.
undoChanges := true
defer func() {
if undoChanges {
d.description = oldDescription
d.architecture = oldArchitecture
d.ephemeral = oldEphemeral
d.expandedConfig = oldExpandedConfig
d.expandedDevices = oldExpandedDevices
d.localConfig = oldLocalConfig
d.localDevices = oldLocalDevices
d.profiles = oldProfiles
d.expiryDate = oldExpiryDate
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
d.initLXC(true)
cgroup.TaskSchedulerTrigger("container", d.name, "changed")
}
}()
// Apply the various changes
d.description = args.Description
d.architecture = args.Architecture
d.ephemeral = args.Ephemeral
d.localConfig = args.Config
d.localDevices = args.Devices
d.profiles = args.Profiles
d.expiryDate = args.ExpiryDate
// Expand the config and refresh the LXC config
err = d.expandConfig(nil)
if err != nil {
return errors.Wrap(err, "Expand config")
}
err = d.expandDevices(nil)
if err != nil {
return errors.Wrap(err, "Expand devices")
}
// Diff the configurations
changedConfig := []string{}
for key := range oldExpandedConfig {
if oldExpandedConfig[key] != d.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
for key := range d.expandedConfig {
if oldExpandedConfig[key] != d.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
// Diff the devices
removeDevices, addDevices, updateDevices, allUpdatedKeys := oldExpandedDevices.Update(d.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
// This function needs to return a list of fields that are excluded from differences
// between oldDevice and newDevice. The result of this is that as long as the
// devices are otherwise identical except for the fields returned here, then the
// device is considered to be being "updated" rather than "added & removed".
oldDevType, err := device.LoadByType(d.state, d.Project(), oldDevice)
if err != nil {
return []string{} // Couldn't create Device, so this cannot be an update.
}
newDevType, err := device.LoadByType(d.state, d.Project(), newDevice)
if err != nil {
return []string{} // Couldn't create Device, so this cannot be an update.
}
return newDevType.UpdatableFields(oldDevType)
})
if userRequested {
// Do some validation of the config diff
err = instance.ValidConfig(d.state.OS, d.expandedConfig, false, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded config")
}
// Do full expanded validation of the devices diff.
err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded devices")
}
}
// Run through initLXC to catch anything we missed
if userRequested {
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
err = d.initLXC(true)
if err != nil {
return errors.Wrap(err, "Initialize LXC")
}
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// If apparmor changed, re-validate the apparmor profile
if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
err = apparmor.InstanceParse(d.state, d)
if err != nil {
return errors.Wrap(err, "Parse AppArmor profile")
}
}
if shared.StringInSlice("security.idmap.isolated", changedConfig) || shared.StringInSlice("security.idmap.base", changedConfig) || shared.StringInSlice("security.idmap.size", changedConfig) || shared.StringInSlice("raw.idmap", changedConfig) || shared.StringInSlice("security.privileged", changedConfig) {
var idmap *idmap.IdmapSet
base := int64(0)
if !d.IsPrivileged() {
// update the idmap
idmap, base, err = findIdmap(
d.state,
d.Name(),
d.expandedConfig["security.idmap.isolated"],
d.expandedConfig["security.idmap.base"],
d.expandedConfig["security.idmap.size"],
d.expandedConfig["raw.idmap"],
)
if err != nil {
return errors.Wrap(err, "Failed to get ID map")
}
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
return err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
d.localConfig["volatile.idmap.next"] = jsonIdmap
d.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
// Invalid idmap cache
d.idmapset = nil
}
isRunning := d.IsRunning()
// Use the device interface to apply update changes.
err = d.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
if err != nil {
return err
}
// Update MAAS (must run after the MAC addresses have been generated).
updateMAAS := false
for _, key := range []string{"maas.subnet.ipv4", "maas.subnet.ipv6", "ipv4.address", "ipv6.address"} {
if shared.StringInSlice(key, allUpdatedKeys) {
updateMAAS = true
break
}
}
if !d.IsSnapshot() && updateMAAS {
err = d.maasUpdate(oldExpandedDevices.CloneNative())
if err != nil {
return err
}
}
// Apply the live changes
if isRunning {
// Live update the container config
for _, key := range changedConfig {
value := d.expandedConfig[key]
if key == "raw.apparmor" || key == "security.nesting" {
// Update the AppArmor profile
err = apparmor.InstanceLoad(d.state, d)
if err != nil {
return err
}
} else if key == "security.devlxd" {
if value == "" || shared.IsTrue(value) {
err = d.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
if err != nil {
return err
}
} else if d.FileExists("/dev/lxd") == nil {
err = d.removeMount("/dev/lxd")
if err != nil {
return err
}
err = d.FileRemove("/dev/lxd")
if err != nil {
return err
}
}
} else if key == "linux.kernel_modules" && value != "" {
for _, module := range strings.Split(value, ",") {
module = strings.TrimPrefix(module, " ")
err := util.LoadModule(module)
if err != nil {
return fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
}
}
} else if key == "limits.disk.priority" {
if !d.state.OS.CGInfo.Supports(cgroup.Blkio, cg) {
continue
}
priorityInt := 5
diskPriority := d.expandedConfig["limits.disk.priority"]
if diskPriority != "" {
priorityInt, err = strconv.Atoi(diskPriority)
if err != nil {
return err
}
}
// Minimum valid value is 10
priority := int64(priorityInt * 100)
if priority == 0 {
priority = 10
}
cg.SetBlkioWeight(priority)
if err != nil {
return err
}
} else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") {
// Skip if no memory CGroup
if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
continue
}
// Set the new memory limit
memory := d.expandedConfig["limits.memory"]
memoryEnforce := d.expandedConfig["limits.memory.enforce"]
memorySwap := d.expandedConfig["limits.memory.swap"]
var memoryInt int64
// Parse memory
if memory == "" {
memoryInt = -1
} else if strings.HasSuffix(memory, "%") {
percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
if err != nil {
return err
}
memoryTotal, err := shared.DeviceTotalMemory()
if err != nil {
return err
}
memoryInt = int64((memoryTotal / 100) * percent)
} else {
memoryInt, err = units.ParseByteSizeString(memory)
if err != nil {
return err
}
}
// Store the old values for revert
oldMemswLimit := int64(-1)
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
oldMemswLimit, err = cg.GetMemorySwapLimit()
if err != nil {
oldMemswLimit = -1
}
}
oldLimit, err := cg.GetMemoryLimit()
if err != nil {
oldLimit = -1
}
oldSoftLimit, err := cg.GetMemorySoftLimit()
if err != nil {
oldSoftLimit = -1
}
revertMemory := func() {
if oldSoftLimit != -1 {
cg.SetMemorySoftLimit(oldSoftLimit)
}
if oldLimit != -1 {
cg.SetMemoryLimit(oldLimit)
}
if oldMemswLimit != -1 {
cg.SetMemorySwapLimit(oldMemswLimit)
}
}
// Reset everything
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
err = cg.SetMemorySwapLimit(-1)
if err != nil {
revertMemory()
return err
}
}
err = cg.SetMemoryLimit(-1)
if err != nil {
revertMemory()
return err
}
err = cg.SetMemorySoftLimit(-1)
if err != nil {
revertMemory()
return err
}
// Set the new values
if memoryEnforce == "soft" {
// Set new limit
err = cg.SetMemorySoftLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
} else {
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
err = cg.SetMemoryLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
err = cg.SetMemorySwapLimit(0)
if err != nil {
revertMemory()
return err
}
} else {
err = cg.SetMemoryLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
}
// Set soft limit to value 10% less than hard limit
err = cg.SetMemorySoftLimit(int64(float64(memoryInt) * 0.9))
if err != nil {
revertMemory()
return err
}
}
if !d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
continue
}
// Configure the swappiness
if key == "limits.memory.swap" || key == "limits.memory.swap.priority" {
memorySwap := d.expandedConfig["limits.memory.swap"]
memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
if memorySwap != "" && !shared.IsTrue(memorySwap) {
err = cg.SetMemorySwappiness(0)
if err != nil {
return err
}
} else {
priority := 0
if memorySwapPriority != "" {
priority, err = strconv.Atoi(memorySwapPriority)
if err != nil {
return err
}
}
err = cg.SetMemorySwappiness(int64(60 - 10 + priority))
if err != nil {
return err
}
}
}
} else if key == "limits.network.priority" {
err := d.setNetworkPriority()
if err != nil {
return err
}
} else if key == "limits.cpu" {
// Trigger a scheduler re-run
cgroup.TaskSchedulerTrigger("container", d.name, "changed")
} else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" {
// Skip if no cpu CGroup
if !d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
continue
}
// Apply new CPU limits
cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(d.expandedConfig["limits.cpu.allowance"], d.expandedConfig["limits.cpu.priority"])
if err != nil {
return err
}
err = cg.SetCPUShare(cpuShares)
if err != nil {
return err
}
err = cg.SetCPUCfsLimit(cpuCfsPeriod, cpuCfsQuota)
if err != nil {
return err
}
} else if key == "limits.processes" {
if !d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
continue
}
if value == "" {
err = cg.SetMaxProcesses(-1)
if err != nil {
return err
}
} else {
valueInt, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
err = cg.SetMaxProcesses(valueInt)
if err != nil {
return err
}
}
} else if strings.HasPrefix(key, "limits.hugepages.") {
if !d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
continue
}
pageType := ""
switch key {
case "limits.hugepages.64KB":
pageType = "64KB"
case "limits.hugepages.1MB":
pageType = "1MB"
case "limits.hugepages.2MB":
pageType = "2MB"
case "limits.hugepages.1GB":
pageType = "1GB"
}
valueInt := int64(-1)
if value != "" {
valueInt, err = units.ParseByteSizeString(value)
if err != nil {
return err
}
}
err = cg.SetHugepagesLimit(pageType, valueInt)
if err != nil {
return err
}
}
}
}
// Finally, apply the changes to the database
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Snapshots should update only their descriptions and expiry date.
if d.IsSnapshot() {
return tx.UpdateInstanceSnapshot(d.id, d.description, d.expiryDate)
}
object, err := tx.GetInstance(d.project, d.name)
if err != nil {
return err
}
object.Description = d.description
object.Architecture = d.architecture
object.Ephemeral = d.ephemeral
object.ExpiryDate = d.expiryDate
object.Config = d.localConfig
object.Profiles = d.profiles
object.Devices = d.localDevices.CloneNative()
return tx.UpdateInstance(d.project, d.name, *object)
})
if err != nil {
return errors.Wrap(err, "Failed to update database")
}
err = d.UpdateBackupFile()
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Failed to write backup file")
}
// Send devlxd notifications
if isRunning {
// Config changes (only for user.* keys
for _, key := range changedConfig {
if !strings.HasPrefix(key, "user.") {
continue
}
msg := map[string]string{
"key": key,
"old_value": oldExpandedConfig[key],
"value": d.expandedConfig[key],
}
err = d.devlxdEventSend("config", msg)
if err != nil {
return err
}
}
// Device changes
for k, m := range removeDevices {
msg := map[string]interface{}{
"action": "removed",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
for k, m := range updateDevices {
msg := map[string]interface{}{
"action": "updated",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
for k, m := range addDevices {
msg := map[string]interface{}{
"action": "added",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
}
// Success, update the closure to mark that the changes should be kept.
undoChanges = false
if userRequested {
d.lifecycle("updated", nil)
}
return nil
}
func (d *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
// Remove devices in reverse order to how they were added.
for _, dev := range removeDevices.Reversed() {
if instanceRunning {
err := d.deviceStop(dev.Name, dev.Config, instanceRunning, "")
if err == device.ErrUnsupportedDevType {
continue // No point in trying to remove device below.
} else if err != nil {
return errors.Wrapf(err, "Failed to stop device %q", dev.Name)
}
}
err := d.deviceRemove(dev.Name, dev.Config, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device %q", dev.Name)
}
// Check whether we are about to add the same device back with updated config and
// if not, or if the device type has changed, then remove all volatile keys for
// this device (as its an actual removal or a device type change).
err = d.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
if err != nil {
return errors.Wrapf(err, "Failed to reset volatile data for device %q", dev.Name)
}
}
// Add devices in sorted order, this ensures that device mounts are added in path order.
for _, dev := range addDevices.Sorted() {
err := d.deviceAdd(dev.Name, dev.Config, instanceRunning)
if err == device.ErrUnsupportedDevType {
continue // No point in trying to start device below.
} else if err != nil {
if userRequested {
return errors.Wrapf(err, "Failed to add device %q", dev.Name)
}
// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
d.logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"device": dev.Name, "err": err})
continue
}
if instanceRunning {
_, err := d.deviceStart(dev.Name, dev.Config, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
}
}
for _, dev := range updateDevices.Sorted() {
err := d.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to update device %q", dev.Name)
}
}
return nil
}
// Export backs up the instance.
func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
meta := api.ImageMetadata{}
if d.IsRunning() {
return meta, fmt.Errorf("Cannot export a running instance as an image")
}
d.logger.Info("Exporting instance", ctxMap)
// Start the storage.
_, err := d.mount()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer d.unmount()
// Get IDMap to unshift container as the tarball is created.
idmap, err := d.DiskIdmap()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Create the tarball.
tarWriter := instancewriter.NewInstanceTarWriter(w, idmap)
// Keep track of the first path we saw for each path with nlink>1.
cDir := d.Path()
// Path inside the tar image is the pathname starting after cDir.
offset := len(cDir) + 1
writeToTar := func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
err = tarWriter.WriteFile(path[offset:], path, fi, false)
if err != nil {
d.logger.Debug("Error tarring up", log.Ctx{"path": path, "err": err})
return err
}
return nil
}
// Look for metadata.yaml.
fnam := filepath.Join(cDir, "metadata.yaml")
if !shared.PathExists(fnam) {
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
// Get the instance's architecture.
var arch string
if d.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
arch, _ = osarch.ArchitectureName(parent.Architecture())
} else {
arch, _ = osarch.ArchitectureName(d.architecture)
}
if arch == "" {
arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Fill in the metadata.
meta.Architecture = arch
meta.CreationDate = time.Now().UTC().Unix()
meta.Properties = properties
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
tmpOffset := len(path.Dir(fnam)) + 1
if err := tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false); err != nil {
tarWriter.Close()
d.logger.Debug("Error writing to tarfile", log.Ctx{"err": err})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
} else {
// Parse the metadata.
content, err := ioutil.ReadFile(fnam)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
err = yaml.Unmarshal(content, &meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
meta.Properties = properties
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Include metadata.yaml in the tarball.
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
d.logger.Debug("Error statting during export", log.Ctx{"fileName": fnam})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
tmpOffset := len(path.Dir(fnam)) + 1
err = tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false)
} else {
err = tarWriter.WriteFile(fnam[offset:], fnam, fi, false)
}
if err != nil {
tarWriter.Close()
d.logger.Debug("Error writing to tarfile", log.Ctx{"err": err})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Include all the rootfs files.
fnam = d.RootfsPath()
err = filepath.Walk(fnam, writeToTar)
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Include all the templates.
fnam = d.TemplatesPath()
if shared.PathExists(fnam) {
err = filepath.Walk(fnam, writeToTar)
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
err = tarWriter.Close()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
d.logger.Info("Exported instance", ctxMap)
return meta, nil
}
func collectCRIULogFile(d instance.Instance, imagesDir string, function string, method string) error {
t := time.Now().Format(time.RFC3339)
newPath := filepath.Join(d.LogPath(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath)
}
func getCRIULogErrors(imagesDir string, method string) (string, error) {
f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method)))
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
ret := []string{}
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "Error") || strings.Contains(line, "Warn") {
ret = append(ret, scanner.Text())
}
}
return strings.Join(ret, "\n"), nil
}
// Migrate migrates the instance to another node.
func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"statedir": args.StateDir,
"actionscript": args.ActionScript,
"predumpdir": args.PreDumpDir,
"features": args.Features,
"stop": args.Stop}
_, err := exec.LookPath("criu")
if err != nil {
return fmt.Errorf("Unable to perform container live migration. CRIU isn't installed")
}
d.logger.Info("Migrating container", ctxMap)
prettyCmd := ""
switch args.Cmd {
case liblxc.MIGRATE_PRE_DUMP:
prettyCmd = "pre-dump"
case liblxc.MIGRATE_DUMP:
prettyCmd = "dump"
case liblxc.MIGRATE_RESTORE:
prettyCmd = "restore"
case liblxc.MIGRATE_FEATURE_CHECK:
prettyCmd = "feature-check"
default:
prettyCmd = "unknown"
d.logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
}
pool, err := d.getStoragePool()
if err != nil {
return err
}
preservesInodes := pool.Driver().Info().PreservesInodes
/* This feature was only added in 2.0.1, let's not ask for it
* before then or migrations will fail.
*/
if !util.RuntimeLiblxcVersionAtLeast(2, 0, 1) {
preservesInodes = false
}
finalStateDir := args.StateDir
var migrateErr error
/* For restore, we need an extra fork so that we daemonize monitor
* instead of having it be a child of LXD, so let's hijack the command
* here and do the extra fork.
*/
if args.Cmd == liblxc.MIGRATE_RESTORE {
// Run the shared start
_, postStartHooks, err := d.startCommon()
if err != nil {
return errors.Wrap(err, "Failed preparing container for start")
}
/*
* For unprivileged containers we need to shift the
* perms on the images images so that they can be
* opened by the process after it is in its user
* namespace.
*/
idmapset, err := d.CurrentIdmap()
if err != nil {
return err
}
if idmapset != nil {
storageType, err := d.getStorageType()
if err != nil {
return errors.Wrap(err, "Storage type")
}
if storageType == "zfs" {
err = idmapset.ShiftRootfs(args.StateDir, storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.ShiftBtrfsRootfs(args.StateDir, idmapset)
} else {
err = idmapset.ShiftRootfs(args.StateDir, nil)
}
if err != nil {
return err
}
}
configPath := filepath.Join(d.LogPath(), "lxc.conf")
if args.DumpDir != "" {
finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
}
_, migrateErr = shared.RunCommand(
d.state.OS.ExecPath,
"forkmigrate",
d.name,
d.state.OS.LxcPath,
configPath,
finalStateDir,
fmt.Sprintf("%v", preservesInodes))
if migrateErr == nil {
// Run any post start hooks.
err := d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
return err
}
}
} else if args.Cmd == liblxc.MIGRATE_FEATURE_CHECK {
err := d.initLXC(true)
if err != nil {
return err
}
opts := liblxc.MigrateOptions{
FeaturesToCheck: args.Features,
}
migrateErr = d.c.Migrate(args.Cmd, opts)
if migrateErr != nil {
d.logger.Info("CRIU feature check failed", ctxMap)
return migrateErr
}
return nil
} else {
err := d.initLXC(true)
if err != nil {
return err
}
script := ""
if args.ActionScript {
script = filepath.Join(args.StateDir, "action.sh")
}
if args.DumpDir != "" {
finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
}
// TODO: make this configurable? Ultimately I think we don't
// want to do that; what we really want to do is have "modes"
// of criu operation where one is "make this succeed" and the
// other is "make this fast". Anyway, for now, let's choose a
// really big size so it almost always succeeds, even if it is
// slow.
ghostLimit := uint64(256 * 1024 * 1024)
opts := liblxc.MigrateOptions{
Stop: args.Stop,
Directory: finalStateDir,
Verbose: true,
PreservesInodes: preservesInodes,
ActionScript: script,
GhostLimit: ghostLimit,
}
if args.PreDumpDir != "" {
opts.PredumpDir = fmt.Sprintf("../%s", args.PreDumpDir)
}
if !d.IsRunning() {
// otherwise the migration will needlessly fail
args.Stop = false
}
migrateErr = d.c.Migrate(args.Cmd, opts)
}
collectErr := collectCRIULogFile(d, finalStateDir, args.Function, prettyCmd)
if collectErr != nil {
d.logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
}
if migrateErr != nil {
log, err2 := getCRIULogErrors(finalStateDir, prettyCmd)
if err2 == nil {
d.logger.Info("Failed migrating container", ctxMap)
migrateErr = fmt.Errorf("%s %s failed\n%s", args.Function, prettyCmd, log)
}
return migrateErr
}
d.logger.Info("Migrated container", ctxMap)
return nil
}
func (d *lxc) templateApplyNow(trigger instance.TemplateTrigger) error {
// If there's no metadata, just return
fname := filepath.Join(d.Path(), "metadata.yaml")
if !shared.PathExists(fname) {
return nil
}
// Parse the metadata
content, err := ioutil.ReadFile(fname)
if err != nil {
return errors.Wrap(err, "Failed to read metadata")
}
metadata := new(api.ImageMetadata)
err = yaml.Unmarshal(content, &metadata)
if err != nil {
return errors.Wrapf(err, "Could not parse %s", fname)
}
// Find rootUID and rootGID
idmapset, err := d.DiskIdmap()
if err != nil {
return errors.Wrap(err, "Failed to set ID map")
}
rootUID := int64(0)
rootGID := int64(0)
// Get the right uid and gid for the container
if idmapset != nil {
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
// Figure out the container architecture
arch, err := osarch.ArchitectureName(d.architecture)
if err != nil {
arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
if err != nil {
return errors.Wrap(err, "Failed to detect system architecture")
}
}
// Generate the container metadata
containerMeta := make(map[string]string)
containerMeta["name"] = d.name
containerMeta["type"] = "container"
containerMeta["architecture"] = arch
if d.ephemeral {
containerMeta["ephemeral"] = "true"
} else {
containerMeta["ephemeral"] = "false"
}
if d.IsPrivileged() {
containerMeta["privileged"] = "true"
} else {
containerMeta["privileged"] = "false"
}
// Go through the templates
for tplPath, tpl := range metadata.Templates {
err = func(tplPath string, tpl *api.ImageMetadataTemplate) error {
var w *os.File
// Check if the template should be applied now
found := false
for _, tplTrigger := range tpl.When {
if tplTrigger == string(trigger) {
found = true
break
}
}
if !found {
return nil
}
// Open the file to template, create if needed
fullpath := filepath.Join(d.RootfsPath(), strings.TrimLeft(tplPath, "/"))
if shared.PathExists(fullpath) {
if tpl.CreateOnly {
return nil
}
// Open the existing file
w, err = os.Create(fullpath)
if err != nil {
return errors.Wrap(err, "Failed to create template file")
}
} else {
// Create the directories leading to the file
shared.MkdirAllOwner(path.Dir(fullpath), 0755, int(rootUID), int(rootGID))
// Create the file itself
w, err = os.Create(fullpath)
if err != nil {
return err
}
// Fix ownership and mode
w.Chown(int(rootUID), int(rootGID))
w.Chmod(0644)
}
defer w.Close()
// Read the template
tplString, err := ioutil.ReadFile(filepath.Join(d.TemplatesPath(), tpl.Template))
if err != nil {
return errors.Wrap(err, "Failed to read template file")
}
// Restrict filesystem access to within the container's rootfs
tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", d.name, tpl.Template), template.ChrootLoader{Path: d.RootfsPath()})
tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
if err != nil {
return errors.Wrap(err, "Failed to render template")
}
configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
val, ok := d.expandedConfig[confKey.String()]
if !ok {
return confDefault
}
return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
}
// Render the template
tplRender.ExecuteWriter(pongo2.Context{"trigger": trigger,
"path": tplPath,
"container": containerMeta,
"instance": containerMeta,
"config": d.expandedConfig,
"devices": d.expandedDevices,
"properties": tpl.Properties,
"config_get": configGet}, w)
return nil
}(tplPath, tpl)
if err != nil {
return err
}
}
return nil
}
func (d *lxc) inheritInitPidFd() (int, *os.File) {
if d.state.OS.PidFds {
pidFdFile, err := d.InitPidFd()
if err != nil {
return -1, nil
}
return 3, pidFdFile
}
return -1, nil
}
// FileExists returns whether file exists inside instance.
func (d *lxc) FileExists(path string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Check if the file exists in the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"exists",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
path,
)
// Process forkcheckfile response
if stderr != "" {
if strings.HasPrefix(stderr, "error:") {
return fmt.Errorf(strings.TrimPrefix(strings.TrimSuffix(stderr, "\n"), "error: "))
}
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
d.logger.Debug("forkcheckfile", log.Ctx{"line": line})
}
}
if err != nil {
return err
}
return nil
}
// FilePull gets a file from the instance.
func (d *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return -1, -1, 0, "", nil, err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Get the file from the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"pull",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
srcpath,
dstpath,
)
uid := int64(-1)
gid := int64(-1)
mode := -1
fileType := "unknown"
var dirEnts []string
var errStr string
// Process forkgetfile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return -1, -1, 0, "", nil, os.ErrNotExist
}
return -1, -1, 0, "", nil, fmt.Errorf(errStr)
}
// Extract the uid
if strings.HasPrefix(line, "uid: ") {
uid, err = strconv.ParseInt(strings.TrimPrefix(line, "uid: "), 10, 64)
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
// Extract the gid
if strings.HasPrefix(line, "gid: ") {
gid, err = strconv.ParseInt(strings.TrimPrefix(line, "gid: "), 10, 64)
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
// Extract the mode
if strings.HasPrefix(line, "mode: ") {
mode, err = strconv.Atoi(strings.TrimPrefix(line, "mode: "))
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
if strings.HasPrefix(line, "type: ") {
fileType = strings.TrimPrefix(line, "type: ")
continue
}
if strings.HasPrefix(line, "entry: ") {
ent := strings.TrimPrefix(line, "entry: ")
ent = strings.Replace(ent, "\x00", "\n", -1)
dirEnts = append(dirEnts, ent)
continue
}
d.logger.Debug("forkgetfile", log.Ctx{"line": line})
}
if err != nil {
return -1, -1, 0, "", nil, err
}
// Unmap uid and gid if needed
if !d.IsRunning() {
idmapset, err := d.DiskIdmap()
if err != nil {
return -1, -1, 0, "", nil, err
}
if idmapset != nil {
uid, gid = idmapset.ShiftFromNs(uid, gid)
}
}
return uid, gid, os.FileMode(mode), fileType, dirEnts, nil
}
// FilePush sends a file into the instance.
func (d *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
var rootUID int64
var rootGID int64
var errStr string
// Map uid and gid if needed
if !d.IsRunning() {
idmapset, err := d.DiskIdmap()
if err != nil {
return err
}
if idmapset != nil {
uid, gid = idmapset.ShiftIntoNs(uid, gid)
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
}
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
defaultMode := 0640
if fileType == "directory" {
defaultMode = 0750
}
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Push the file to the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"push",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
srcpath,
dstpath,
fileType,
fmt.Sprintf("%d", uid),
fmt.Sprintf("%d", gid),
fmt.Sprintf("%d", mode),
fmt.Sprintf("%d", rootUID),
fmt.Sprintf("%d", rootGID),
fmt.Sprintf("%d", int(os.FileMode(defaultMode)&os.ModePerm)),
write,
)
// Process forkgetfile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return os.ErrNotExist
}
return fmt.Errorf(errStr)
}
}
if err != nil {
return err
}
return nil
}
// FileRemove removes a file inside the instance.
func (d *lxc) FileRemove(path string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
var errStr string
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Remove the file from the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"remove",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
path,
)
// Process forkremovefile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return os.ErrNotExist
}
return fmt.Errorf(errStr)
}
}
if err != nil {
return err
}
return nil
}
// Console attaches to the instance console.
func (d *lxc) Console(protocol string) (*os.File, chan error, error) {
if protocol != instance.ConsoleTypeConsole {
return nil, nil, fmt.Errorf("Container instances don't support %q output", protocol)
}
chDisconnect := make(chan error, 1)
args := []string{
d.state.OS.ExecPath,
"forkconsole",
project.Instance(d.Project(), d.Name()),
d.state.OS.LxcPath,
filepath.Join(d.LogPath(), "lxc.conf"),
"tty=0",
"escape=-1"}
idmapset, err := d.CurrentIdmap()
if err != nil {
return nil, nil, err
}
var rootUID, rootGID int64
if idmapset != nil {
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
ptx, pty, err := shared.OpenPty(rootUID, rootGID)
if err != nil {
return nil, nil, err
}
cmd := exec.Cmd{}
cmd.Path = d.state.OS.ExecPath
cmd.Args = args
cmd.Stdin = pty
cmd.Stdout = pty
cmd.Stderr = pty
err = cmd.Start()
if err != nil {
return nil, nil, err
}
go func() {
err = cmd.Wait()
ptx.Close()
pty.Close()
}()
go func() {
<-chDisconnect
cmd.Process.Kill()
}()
return ptx, chDisconnect, nil
}
// ConsoleLog returns console log.
func (d *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) {
msg, err := d.c.ConsoleLog(opts)
if err != nil {
return "", err
}
return string(msg), nil
}
// Exec executes a command inside the instance.
func (d *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
// Prepare the environment
envSlice := []string{}
for k, v := range req.Environment {
envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v))
}
// Setup logfile
logPath := filepath.Join(d.LogPath(), "forkexec.log")
logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
if err != nil {
return nil, err
}
// Prepare the subcommand
cname := project.Instance(d.Project(), d.Name())
args := []string{
d.state.OS.ExecPath,
"forkexec",
cname,
d.state.OS.LxcPath,
filepath.Join(d.LogPath(), "lxc.conf"),
req.Cwd,
fmt.Sprintf("%d", req.User),
fmt.Sprintf("%d", req.Group),
}
args = append(args, "--")
args = append(args, "env")
args = append(args, envSlice...)
args = append(args, "--")
args = append(args, "cmd")
args = append(args, req.Command...)
cmd := exec.Cmd{}
cmd.Path = d.state.OS.ExecPath
cmd.Args = args
cmd.Stdin = nil
cmd.Stdout = logFile
cmd.Stderr = logFile
// Mitigation for CVE-2019-5736
useRexec := false
if d.expandedConfig["raw.idmap"] != "" {
err := instance.AllowedUnprivilegedOnlyMap(d.expandedConfig["raw.idmap"])
if err != nil {
useRexec = true
}
}
if shared.IsTrue(d.expandedConfig["security.privileged"]) {
useRexec = true
}
if useRexec {
cmd.Env = append(os.Environ(), "LXC_MEMFD_REXEC=1")
}
// Setup communication PIPE
rStatus, wStatus, err := os.Pipe()
defer rStatus.Close()
if err != nil {
return nil, err
}
cmd.ExtraFiles = []*os.File{stdin, stdout, stderr, wStatus}
err = cmd.Start()
wStatus.Close()
if err != nil {
return nil, err
}
attachedPid := shared.ReadPid(rStatus)
if attachedPid <= 0 {
cmd.Wait()
d.logger.Error("Failed to retrieve PID of executing child process")
return nil, fmt.Errorf("Failed to retrieve PID of executing child process")
}
d.logger.Debug("Retrieved PID of executing child process", log.Ctx{"attachedPid": attachedPid})
instCmd := &lxcCmd{
cmd: &cmd,
attachedChildPid: int(attachedPid),
}
return instCmd, nil
}
func (d *lxc) cpuState() api.InstanceStateCPU {
cpu := api.InstanceStateCPU{}
// CPU usage in seconds
cg, err := d.cgroup(nil)
if err != nil {
return cpu
}
if !d.state.OS.CGInfo.Supports(cgroup.CPUAcct, cg) {
return cpu
}
value, err := cg.GetCPUAcctUsage()
if err != nil {
cpu.Usage = -1
return cpu
}
cpu.Usage = value
return cpu
}
func (d *lxc) diskState() map[string]api.InstanceStateDisk {
disk := map[string]api.InstanceStateDisk{}
for _, dev := range d.expandedDevices.Sorted() {
if dev.Config["type"] != "disk" {
continue
}
var usage int64
if dev.Config["path"] == "/" {
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
d.logger.Error("Error loading storage pool", log.Ctx{"err": err})
continue
}
usage, err = pool.GetInstanceUsage(d)
if err != nil {
if err != storageDrivers.ErrNotSupported {
d.logger.Error("Error getting disk usage", log.Ctx{"err": err})
}
continue
}
} else if dev.Config["pool"] != "" {
pool, err := storagePools.GetPoolByName(d.state, dev.Config["pool"])
if err != nil {
d.logger.Error("Error loading storage pool", log.Ctx{"poolName": dev.Config["pool"], "err": err})
continue
}
usage, err = pool.GetCustomVolumeUsage(d.Project(), dev.Config["source"])
if err != nil {
if err != storageDrivers.ErrNotSupported {
d.logger.Error("Error getting volume usage", log.Ctx{"volume": dev.Config["source"], "err": err})
}
continue
}
} else {
continue
}
disk[dev.Name] = api.InstanceStateDisk{Usage: usage}
}
return disk
}
func (d *lxc) memoryState() api.InstanceStateMemory {
memory := api.InstanceStateMemory{}
cg, err := d.cgroup(nil)
if err != nil {
return memory
}
if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
return memory
}
// Memory in bytes
value, err := cg.GetMemoryUsage()
if err == nil {
memory.Usage = value
}
// Memory peak in bytes
if d.state.OS.CGInfo.Supports(cgroup.MemoryMaxUsage, cg) {
value, err = cg.GetMemoryMaxUsage()
if err == nil {
memory.UsagePeak = value
}
}
if d.state.OS.CGInfo.Supports(cgroup.MemorySwapUsage, cg) {
// Swap in bytes
if memory.Usage > 0 {
value, err := cg.GetMemorySwapUsage()
if err == nil {
memory.SwapUsage = value
}
}
// Swap peak in bytes
if memory.UsagePeak > 0 {
value, err = cg.GetMemorySwapMaxUsage()
if err == nil {
memory.SwapUsagePeak = value
}
}
}
return memory
}
func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
result := map[string]api.InstanceStateNetwork{}
pid := d.InitPID()
if pid < 1 {
return result
}
couldUseNetnsGetifaddrs := d.state.OS.NetnsGetifaddrs
if couldUseNetnsGetifaddrs {
nw, err := netutils.NetnsGetifaddrs(int32(pid))
if err != nil {
couldUseNetnsGetifaddrs = false
d.logger.Error("Failed to retrieve network information via netlink", log.Ctx{"pid": pid})
} else {
result = nw
}
}
if !couldUseNetnsGetifaddrs {
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Get the network state from the container
out, _, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forknet",
"info",
"--",
fmt.Sprintf("%d", pid),
fmt.Sprintf("%d", pidFdNr))
// Process forkgetnet response
if err != nil {
d.logger.Error("Error calling 'lxd forknet", log.Ctx{"err": err, "pid": pid})
return result
}
// If we can use netns_getifaddrs() but it failed and the setns() +
// netns_getifaddrs() succeeded we should just always fallback to the
// setns() + netns_getifaddrs() style retrieval.
d.state.OS.NetnsGetifaddrs = false
nw := map[string]api.InstanceStateNetwork{}
err = json.Unmarshal([]byte(out), &nw)
if err != nil {
d.logger.Error("Failure to read forknet json", log.Ctx{"err": err})
return result
}
result = nw
}
// Get host_name from volatile data if not set already.
for name, dev := range result {
if dev.HostName == "" {
dev.HostName = d.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
result[name] = dev
}
}
return result
}
func (d *lxc) processesState() int64 {
// Return 0 if not running
pid := d.InitPID()
if pid == -1 {
return 0
}
cg, err := d.cgroup(nil)
if err != nil {
return 0
}
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
value, err := cg.GetProcessesUsage()
if err != nil {
return -1
}
return value
}
pids := []int64{int64(pid)}
// Go through the pid list, adding new pids at the end so we go through them all
for i := 0; i < len(pids); i++ {
fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i])
fcont, err := ioutil.ReadFile(fname)
if err != nil {
// the process terminated during execution of this loop
continue
}
content := strings.Split(string(fcont), " ")
for j := 0; j < len(content); j++ {
pid, err := strconv.ParseInt(content[j], 10, 64)
if err == nil {
pids = append(pids, pid)
}
}
}
return int64(len(pids))
}
// getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
// function is called, the handle is cached internally in the lxc struct.
func (d *lxc) getStoragePool() (storagePools.Pool, error) {
if d.storagePool != nil {
return d.storagePool, nil
}
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
return nil, err
}
d.storagePool = pool
return d.storagePool, nil
}
// getStorageType returns the storage type of the instance's storage pool.
func (d *lxc) getStorageType() (string, error) {
pool, err := d.getStoragePool()
if err != nil {
return "", err
}
return pool.Driver().Info().Name, nil
}
// mount the instance's rootfs volume if needed.
func (d *lxc) mount() (*storagePools.MountInfo, error) {
pool, err := d.getStoragePool()
if err != nil {
return nil, err
}
if d.IsSnapshot() {
mountInfo, err := pool.MountInstanceSnapshot(d, nil)
if err != nil {
return nil, err
}
return mountInfo, nil
}
mountInfo, err := pool.MountInstance(d, nil)
if err != nil {
return nil, err
}
return mountInfo, nil
}
// unmount the instance's rootfs volume if needed.
func (d *lxc) unmount() (bool, error) {
pool, err := d.getStoragePool()
if err != nil {
return false, err
}
if d.IsSnapshot() {
unmounted, err := pool.UnmountInstanceSnapshot(d, nil)
if err != nil {
return false, err
}
return unmounted, nil
}
unmounted, err := pool.UnmountInstance(d, nil)
if err != nil {
return false, err
}
return unmounted, nil
}
// insertMountLXD inserts a mount into a LXD container.
// This function is used for the seccomp notifier and so cannot call any
// functions that would cause LXC to talk to the container's monitor. Otherwise
// we'll have a deadlock (with a timeout but still). The InitPID() call here is
// the exception since the seccomp notifier will make sure to always pass a
// valid PID.
func (d *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
pid := mntnsPID
if pid <= 0 {
// Get the init PID
pid = d.InitPID()
if pid == -1 {
// Container isn't running
return fmt.Errorf("Can't insert mount into stopped container")
}
}
// Create the temporary mount target
var tmpMount string
var err error
if shared.IsDir(source) {
tmpMount, err = ioutil.TempDir(d.ShmountsPath(), "lxdmount_")
if err != nil {
return fmt.Errorf("Failed to create shmounts path: %s", err)
}
} else {
f, err := ioutil.TempFile(d.ShmountsPath(), "lxdmount_")
if err != nil {
return fmt.Errorf("Failed to create shmounts path: %s", err)
}
tmpMount = f.Name()
f.Close()
}
defer os.Remove(tmpMount)
// Mount the filesystem
err = unix.Mount(source, tmpMount, fstype, uintptr(flags), "")
if err != nil {
return fmt.Errorf("Failed to setup temporary mount: %s", err)
}
defer unix.Unmount(tmpMount, unix.MNT_DETACH)
// Setup host side shiftfs as needed
if shiftfs {
err = unix.Mount(tmpMount, tmpMount, "shiftfs", 0, "mark,passthrough=3")
if err != nil {
return fmt.Errorf("Failed to setup host side shiftfs mount: %s", err)
}
defer unix.Unmount(tmpMount, unix.MNT_DETACH)
}
// Move the mount inside the container
mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
pidStr := fmt.Sprintf("%d", pid)
pidFdNr, pidFd := seccomp.MakePidFd(pid, d.state)
if pidFdNr >= 0 {
defer pidFd.Close()
}
_, err = shared.RunCommandInheritFds(
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkmount",
"lxd-mount",
"--",
pidStr,
fmt.Sprintf("%d", pidFdNr),
mntsrc,
target,
fmt.Sprintf("%v", shiftfs))
if err != nil {
return err
}
return nil
}
func (d *lxc) insertMountLXC(source, target, fstype string, flags int) error {
cname := project.Instance(d.Project(), d.Name())
configPath := filepath.Join(d.LogPath(), "lxc.conf")
if fstype == "" {
fstype = "none"
}
if !strings.HasPrefix(target, "/") {
target = "/" + target
}
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forkmount",
"lxc-mount",
"--",
cname,
d.state.OS.LxcPath,
configPath,
source,
target,
fstype,
fmt.Sprintf("%d", flags))
if err != nil {
return err
}
return nil
}
func (d *lxc) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
if d.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
return d.insertMountLXC(source, target, fstype, flags)
}
return d.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
}
func (d *lxc) removeMount(mount string) error {
// Get the init PID
pid := d.InitPID()
if pid == -1 {
// Container isn't running
return fmt.Errorf("Can't remove mount from stopped container")
}
if d.state.OS.LXCFeatures["mount_injection_file"] {
configPath := filepath.Join(d.LogPath(), "lxc.conf")
cname := project.Instance(d.Project(), d.Name())
if !strings.HasPrefix(mount, "/") {
mount = "/" + mount
}
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forkmount",
"lxc-umount",
"--",
cname,
d.state.OS.LxcPath,
configPath,
mount)
if err != nil {
return err
}
} else {
// Remove the mount from the container
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
_, err := shared.RunCommandInheritFds(
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkmount",
"lxd-umount",
"--",
fmt.Sprintf("%d", pid),
fmt.Sprintf("%d", pidFdNr),
mount)
if err != nil {
return err
}
}
return nil
}
// InsertSeccompUnixDevice inserts a seccomp device.
func (d *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid int) error {
if pid < 0 {
return fmt.Errorf("Invalid request PID specified")
}
rootLink := fmt.Sprintf("/proc/%d/root", pid)
rootPath, err := os.Readlink(rootLink)
if err != nil {
return err
}
uid, gid, _, _, err := seccomp.TaskIDs(pid)
if err != nil {
return err
}
idmapset, err := d.CurrentIdmap()
if err != nil {
return err
}
nsuid, nsgid := idmapset.ShiftFromNs(uid, gid)
m["uid"] = fmt.Sprintf("%d", nsuid)
m["gid"] = fmt.Sprintf("%d", nsgid)
if !path.IsAbs(m["path"]) {
cwdLink := fmt.Sprintf("/proc/%d/cwd", pid)
prefixPath, err := os.Readlink(cwdLink)
if err != nil {
return err
}
prefixPath = strings.TrimPrefix(prefixPath, rootPath)
m["path"] = filepath.Join(rootPath, prefixPath, m["path"])
} else {
m["path"] = filepath.Join(rootPath, m["path"])
}
idmapSet, err := d.CurrentIdmap()
if err != nil {
return err
}
dev, err := device.UnixDeviceCreate(d.state, idmapSet, d.DevicesPath(), prefix, m, true)
if err != nil {
return fmt.Errorf("Failed to setup device: %s", err)
}
devPath := dev.HostPath
tgtPath := dev.RelativePath
// Bind-mount it into the container
defer os.Remove(devPath)
return d.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
}
func (d *lxc) removeUnixDevices() error {
// Check that we indeed have devices to remove
if !shared.PathExists(d.DevicesPath()) {
return nil
}
// Load the directory listing
dents, err := ioutil.ReadDir(d.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices
for _, f := range dents {
// Skip non-Unix devices
if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
continue
}
// Remove the entry
devicePath := filepath.Join(d.DevicesPath(), f.Name())
err := os.Remove(devicePath)
if err != nil {
d.logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
}
}
return nil
}
// FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
// generated name and hwaddr properties if these are missing from the device.
func (d *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
var err error
newDevice := m.Clone()
// Function to try and guess an available name
nextInterfaceName := func() (string, error) {
devNames := []string{}
// Include all static interface names
for _, dev := range d.expandedDevices.Sorted() {
if dev.Config["name"] != "" && !shared.StringInSlice(dev.Config["name"], devNames) {
devNames = append(devNames, dev.Config["name"])
}
}
// Include all currently allocated interface names
for k, v := range d.expandedConfig {
if !strings.HasPrefix(k, shared.ConfigVolatilePrefix) {
continue
}
fields := strings.SplitN(k, ".", 3)
if len(fields) != 3 {
continue
}
if fields[2] != "name" || shared.StringInSlice(v, devNames) {
continue
}
devNames = append(devNames, v)
}
// Attempt to include all existing interfaces
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err == nil {
defer cc.Release()
interfaces, err := cc.Interfaces()
if err == nil {
for _, name := range interfaces {
if shared.StringInSlice(name, devNames) {
continue
}
devNames = append(devNames, name)
}
}
}
i := 0
name := ""
for {
if m["type"] == "infiniband" {
name = fmt.Sprintf("ib%d", i)
} else {
name = fmt.Sprintf("eth%d", i)
}
// Find a free device name
if !shared.StringInSlice(name, devNames) {
return name, nil
}
i++
}
}
nicType, err := nictype.NICType(d.state, d.Project(), m)
if err != nil {
return nil, err
}
// Fill in the MAC address.
if !shared.StringInSlice(nicType, []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
volatileHwaddr := d.localConfig[configKey]
if volatileHwaddr == "" {
// Generate a new MAC address.
volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
if err != nil || volatileHwaddr == "" {
return nil, errors.Wrapf(err, "Failed generating %q", configKey)
}
// Update the database and update volatileHwaddr with stored value.
volatileHwaddr, err = d.insertConfigkey(configKey, volatileHwaddr)
if err != nil {
return nil, errors.Wrapf(err, "Failed storing generated config key %q", configKey)
}
// Set stored value into current instance config.
d.localConfig[configKey] = volatileHwaddr
d.expandedConfig[configKey] = volatileHwaddr
}
if volatileHwaddr == "" {
return nil, fmt.Errorf("Failed getting %q", configKey)
}
newDevice["hwaddr"] = volatileHwaddr
}
// Fill in the interface name.
if m["name"] == "" {
configKey := fmt.Sprintf("volatile.%s.name", name)
volatileName := d.localConfig[configKey]
if volatileName == "" {
// Generate a new interface name.
volatileName, err = nextInterfaceName()
if err != nil || volatileName == "" {
return nil, errors.Wrapf(err, "Failed generating %q", configKey)
}
// Update the database and update volatileName with stored value.
volatileName, err = d.insertConfigkey(configKey, volatileName)
if err != nil {
return nil, errors.Wrapf(err, "Failed storing generated config key %q", configKey)
}
// Set stored value into current instance config.
d.localConfig[configKey] = volatileName
d.expandedConfig[configKey] = volatileName
}
if volatileName == "" {
return nil, fmt.Errorf("Failed getting %q", configKey)
}
newDevice["name"] = volatileName
}
return newDevice, nil
}
func (d *lxc) removeDiskDevices() error {
// Check that we indeed have devices to remove
if !shared.PathExists(d.DevicesPath()) {
return nil
}
// Load the directory listing
dents, err := ioutil.ReadDir(d.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices
for _, f := range dents {
// Skip non-disk devices
if !strings.HasPrefix(f.Name(), "disk.") {
continue
}
// Always try to unmount the host side
_ = unix.Unmount(filepath.Join(d.DevicesPath(), f.Name()), unix.MNT_DETACH)
// Remove the entry
diskPath := filepath.Join(d.DevicesPath(), f.Name())
err := os.Remove(diskPath)
if err != nil {
d.logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
}
}
return nil
}
// Network I/O limits
func (d *lxc) setNetworkPriority() error {
// Load the go-lxc struct.
err := d.initLXC(false)
if err != nil {
return err
}
// Load the cgroup struct.
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check that the container is running
if !d.IsRunning() {
return fmt.Errorf("Can't set network priority on stopped container")
}
// Don't bother if the cgroup controller doesn't exist
if !d.state.OS.CGInfo.Supports(cgroup.NetPrio, cg) {
return nil
}
// Extract the current priority
networkPriority := d.expandedConfig["limits.network.priority"]
if networkPriority == "" {
networkPriority = "0"
}
networkInt, err := strconv.Atoi(networkPriority)
if err != nil {
return err
}
// Get all the interfaces
netifs, err := net.Interfaces()
if err != nil {
return err
}
// Check that we at least succeeded to set an entry
success := false
var lastError error
for _, netif := range netifs {
err = cg.SetNetIfPrio(fmt.Sprintf("%s %d", netif.Name, networkInt))
if err == nil {
success = true
} else {
lastError = err
}
}
if !success {
return fmt.Errorf("Failed to set network device priority: %s", lastError)
}
return nil
}
// IsFrozen returns if instance is frozen.
func (d *lxc) IsFrozen() bool {
return d.State() == "FROZEN"
}
// IsNesting returns if instance is nested.
func (d *lxc) IsNesting() bool {
return shared.IsTrue(d.expandedConfig["security.nesting"])
}
func (d *lxc) isCurrentlyPrivileged() bool {
if !d.IsRunning() {
return d.IsPrivileged()
}
idmap, err := d.CurrentIdmap()
if err != nil {
return d.IsPrivileged()
}
return idmap == nil
}
// IsPrivileged returns if instance is privileged.
func (d *lxc) IsPrivileged() bool {
return shared.IsTrue(d.expandedConfig["security.privileged"])
}
// IsRunning returns if instance is running.
func (d *lxc) IsRunning() bool {
state := d.State()
return state != "BROKEN" && state != "STOPPED"
}
// InitPID returns PID of init process.
func (d *lxc) InitPID() int {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return -1
}
return d.c.InitPid()
}
// InitPidFd returns pidfd of init process.
func (d *lxc) InitPidFd() (*os.File, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
return d.c.InitPidFd()
}
// DevptsFd returns dirfd of devpts mount.
func (d *lxc) DevptsFd() (*os.File, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
if !liblxc.HasApiExtension("devpts_fd") {
return nil, fmt.Errorf("Missing devpts_fd extension")
}
return d.c.DevptsFd()
}
// CurrentIdmap returns current IDMAP.
func (d *lxc) CurrentIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.idmap.current"]
if !ok {
return d.DiskIdmap()
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// DiskIdmap returns DISK IDMAP.
func (d *lxc) DiskIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.last_state.idmap"]
if !ok {
return nil, nil
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// NextIdmap returns next IDMAP.
func (d *lxc) NextIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.idmap.next"]
if !ok {
return d.CurrentIdmap()
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// State returns instance state.
func (d *lxc) State() string {
state, err := d.getLxcState()
if err != nil {
return api.Error.String()
}
return state.String()
}
// LogFilePath log file path.
func (d *lxc) LogFilePath() string {
return filepath.Join(d.LogPath(), "lxc.log")
}
// StoragePool storage pool name.
func (d *lxc) StoragePool() (string, error) {
poolName, err := d.state.Cluster.GetInstancePool(d.Project(), d.Name())
if err != nil {
return "", err
}
return poolName, nil
}
// Internal MAAS handling.
func (d *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
interfaces := []maas.ContainerInterface{}
for k, m := range devices {
if m["type"] != "nic" {
continue
}
if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
continue
}
m, err := d.FillNetworkDevice(k, m)
if err != nil {
return nil, err
}
subnets := []maas.ContainerInterfaceSubnet{}
// IPv4
if m["maas.subnet.ipv4"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv4"],
Address: m["ipv4.address"],
}
subnets = append(subnets, subnet)
}
// IPv6
if m["maas.subnet.ipv6"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv6"],
Address: m["ipv6.address"],
}
subnets = append(subnets, subnet)
}
iface := maas.ContainerInterface{
Name: m["name"],
MACAddress: m["hwaddr"],
Subnets: subnets,
}
interfaces = append(interfaces, iface)
}
return interfaces, nil
}
func (d *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
// Check if MAAS is configured
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
// Check if there's something that uses MAAS
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
var oldInterfaces []maas.ContainerInterface
if oldDevices != nil {
oldInterfaces, err = d.maasInterfaces(oldDevices)
if err != nil {
return err
}
}
if len(interfaces) == 0 && len(oldInterfaces) == 0 {
return nil
}
// See if we're connected to MAAS
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if exists {
if len(interfaces) == 0 && len(oldInterfaces) > 0 {
return d.state.MAAS.DeleteContainer(d)
}
return d.state.MAAS.UpdateContainer(d, interfaces)
}
return d.state.MAAS.CreateContainer(d, interfaces)
}
func (d *lxc) maasRename(newName string) error {
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if !exists {
return d.maasUpdate(nil)
}
return d.state.MAAS.RenameContainer(d, newName)
}
func (d *lxc) maasDelete() error {
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if !exists {
return nil
}
return d.state.MAAS.DeleteContainer(d)
}
func (d *lxc) CGroup() (*cgroup.CGroup, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
return d.cgroup(nil)
}
func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) {
rw := lxcCgroupReadWriter{}
if cc != nil {
rw.cc = cc
rw.conf = true
} else {
rw.cc = d.c
}
cg, err := cgroup.New(&rw)
if err != nil {
return nil, err
}
cg.UnifiedCapable = liblxc.HasApiExtension("cgroup2")
return cg, nil
}
type lxcCgroupReadWriter struct {
cc *liblxc.Container
conf bool
}
func (rw *lxcCgroupReadWriter) Get(version cgroup.Backend, controller string, key string) (string, error) {
if rw.conf {
lxcKey := fmt.Sprintf("lxc.cgroup.%s", key)
if version == cgroup.V2 {
lxcKey = fmt.Sprintf("lxc.cgroup2.%s", key)
}
return strings.Join(rw.cc.ConfigItem(lxcKey), "\n"), nil
}
return strings.Join(rw.cc.CgroupItem(key), "\n"), nil
}
func (rw *lxcCgroupReadWriter) Set(version cgroup.Backend, controller string, key string, value string) error {
if rw.conf {
if version == cgroup.V1 {
return lxcSetConfigItem(rw.cc, fmt.Sprintf("lxc.cgroup.%s", key), value)
}
return lxcSetConfigItem(rw.cc, fmt.Sprintf("lxc.cgroup2.%s", key), value)
}
return rw.cc.SetCgroupItem(key, value)
}
// UpdateBackupFile writes the instance's backup.yaml file to storage.
func (d *lxc) UpdateBackupFile() error {
pool, err := d.getStoragePool()
if err != nil {
return err
}
return pool.UpdateInstanceBackupFile(d, nil)
}
// SaveConfigFile generates the LXC config file on disk.
func (d *lxc) SaveConfigFile() error {
err := d.initLXC(true)
if err != nil {
return errors.Wrapf(err, "Failed to generate LXC config")
}
// Generate the LXC config.
configPath := filepath.Join(d.LogPath(), "lxc.conf")
err = d.c.SaveConfigFile(configPath)
if err != nil {
os.Remove(configPath)
return errors.Wrapf(err, "Failed to save LXC config to file %q", configPath)
}
return nil
}
// Info returns "lxc" and the currently loaded version of LXC
func (d *lxc) Info() instance.Info {
return instance.Info{
Name: "lxc",
Version: liblxc.Version(),
}
}
lxd/instance/drivers/driver/lxc: Removes deviceResetVolatile provided by common
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package drivers
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/flosch/pongo2"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
liblxc "gopkg.in/lxc/go-lxc.v2"
yaml "gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/cgroup"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/device"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/device/nictype"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/instance/operationlock"
"github.com/lxc/lxd/lxd/maas"
"github.com/lxc/lxd/lxd/network"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/seccomp"
"github.com/lxc/lxd/lxd/state"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/template"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/instancewriter"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
"github.com/lxc/lxd/shared/netutils"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/units"
)
// Helper functions
func lxcSetConfigItem(c *liblxc.Container, key string, value string) error {
if c == nil {
return fmt.Errorf("Uninitialized go-lxc struct")
}
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
switch key {
case "lxc.uts.name":
key = "lxc.utsname"
case "lxc.pty.max":
key = "lxc.pts"
case "lxc.tty.dir":
key = "lxc.devttydir"
case "lxc.tty.max":
key = "lxc.tty"
case "lxc.apparmor.profile":
key = "lxc.aa_profile"
case "lxc.apparmor.allow_incomplete":
key = "lxc.aa_allow_incomplete"
case "lxc.selinux.context":
key = "lxc.se_context"
case "lxc.mount.fstab":
key = "lxc.mount"
case "lxc.console.path":
key = "lxc.console"
case "lxc.seccomp.profile":
key = "lxc.seccomp"
case "lxc.signal.halt":
key = "lxc.haltsignal"
case "lxc.signal.reboot":
key = "lxc.rebootsignal"
case "lxc.signal.stop":
key = "lxc.stopsignal"
case "lxc.log.syslog":
key = "lxc.syslog"
case "lxc.log.level":
key = "lxc.loglevel"
case "lxc.log.file":
key = "lxc.logfile"
case "lxc.init.cmd":
key = "lxc.init_cmd"
case "lxc.init.uid":
key = "lxc.init_uid"
case "lxc.init.gid":
key = "lxc.init_gid"
case "lxc.idmap":
key = "lxc.id_map"
}
}
if strings.HasPrefix(key, "lxc.prlimit.") {
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
return fmt.Errorf(`Process limits require liblxc >= 2.1`)
}
}
err := c.SetConfigItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
}
return nil
}
func lxcStatusCode(state liblxc.State) api.StatusCode {
return map[int]api.StatusCode{
1: api.Stopped,
2: api.Starting,
3: api.Running,
4: api.Stopping,
5: api.Aborting,
6: api.Freezing,
7: api.Frozen,
8: api.Thawed,
9: api.Error,
}[int(state)]
}
// Loader functions
func lxcCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
// Create the container struct
d := &lxc{
common: common{
state: s,
architecture: args.Architecture,
creationDate: args.CreationDate,
dbType: args.Type,
description: args.Description,
ephemeral: args.Ephemeral,
expiryDate: args.ExpiryDate,
id: args.ID,
lastUsedDate: args.LastUsedDate,
localConfig: args.Config,
localDevices: args.Devices,
logger: logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type, "instance": args.Name, "project": args.Project}),
name: args.Name,
node: args.Node,
profiles: args.Profiles,
project: args.Project,
snapshot: args.Snapshot,
stateful: args.Stateful,
},
}
revert := revert.New()
defer revert.Fail()
// Use d.Delete() in revert on error as this function doesn't just create DB records, it can also cause
// other modifications to the host when devices are added.
revert.Add(func() { d.Delete(true) })
// Cleanup the zero values
if d.expiryDate.IsZero() {
d.expiryDate = time.Time{}
}
if d.creationDate.IsZero() {
d.creationDate = time.Time{}
}
if d.lastUsedDate.IsZero() {
d.lastUsedDate = time.Time{}
}
d.logger.Info("Creating container", log.Ctx{"ephemeral": d.ephemeral})
// Load the config.
err := d.init()
if err != nil {
return nil, errors.Wrap(err, "Failed to expand config")
}
// Validate expanded config.
err = instance.ValidConfig(s.OS, d.expandedConfig, false, true)
if err != nil {
return nil, errors.Wrap(err, "Invalid config")
}
err = instance.ValidDevices(s, s.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
if err != nil {
return nil, errors.Wrap(err, "Invalid devices")
}
// Retrieve the container's storage pool.
var storageInstance instance.Instance
if d.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
// Load the parent.
storageInstance, err = instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
return nil, errors.Wrap(err, "Invalid parent")
}
} else {
storageInstance = d
}
_, rootDiskDevice, err := shared.GetRootDiskDevice(storageInstance.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
if rootDiskDevice["pool"] == "" {
return nil, fmt.Errorf("The container's root device is missing the pool property")
}
// Initialize the storage pool.
d.storagePool, err = storagePools.GetPoolByName(d.state, rootDiskDevice["pool"])
if err != nil {
return nil, errors.Wrapf(err, "Failed loading storage pool")
}
// Create a new storage volume database entry for the container's storage volume.
if d.IsSnapshot() {
// Copy volume config from parent.
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(args.Name)
_, parentVol, err := s.Cluster.GetLocalStoragePoolVolume(args.Project, parentName, db.StoragePoolVolumeTypeContainer, d.storagePool.ID())
if err != nil {
return nil, errors.Wrapf(err, "Failed loading source volume for snapshot")
}
_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), parentVol.Config, time.Time{})
if err != nil {
return nil, errors.Wrapf(err, "Failed creating storage record for snapshot")
}
} else {
// Fill default config for new instances.
volumeConfig := map[string]string{}
err = d.storagePool.FillInstanceConfig(d, volumeConfig)
if err != nil {
return nil, errors.Wrapf(err, "Failed filling default config")
}
_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeContainer, d.storagePool.ID(), volumeConfig, db.StoragePoolVolumeContentTypeFS)
if err != nil {
return nil, errors.Wrapf(err, "Failed creating storage record")
}
}
// Setup initial idmap config
var idmap *idmap.IdmapSet
base := int64(0)
if !d.IsPrivileged() {
idmap, base, err = findIdmap(
s,
args.Name,
d.expandedConfig["security.idmap.isolated"],
d.expandedConfig["security.idmap.base"],
d.expandedConfig["security.idmap.size"],
d.expandedConfig["raw.idmap"],
)
if err != nil {
return nil, err
}
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
return nil, err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
err = d.VolatileSet(map[string]string{"volatile.idmap.next": jsonIdmap})
if err != nil {
return nil, err
}
err = d.VolatileSet(map[string]string{"volatile.idmap.base": fmt.Sprintf("%v", base)})
if err != nil {
return nil, err
}
// Invalid idmap cache.
d.idmapset = nil
// Set last_state if not currently set.
if d.localConfig["volatile.last_state.idmap"] == "" {
err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": "[]"})
if err != nil {
return nil, err
}
}
// Re-run init to update the idmap.
err = d.init()
if err != nil {
return nil, err
}
if !d.IsSnapshot() {
// Add devices to container.
for k, m := range d.expandedDevices {
err = d.deviceAdd(k, m, false)
if err != nil && err != device.ErrUnsupportedDevType {
return nil, errors.Wrapf(err, "Failed to add device %q", k)
}
}
// Update MAAS (must run after the MAC addresses have been generated).
err = d.maasUpdate(nil)
if err != nil {
return nil, err
}
}
d.logger.Info("Created container", log.Ctx{"ephemeral": d.ephemeral})
d.lifecycle("created", nil)
revert.Success()
return d, nil
}
func lxcLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
// Create the container struct
d := lxcInstantiate(s, args, nil)
// Setup finalizer
runtime.SetFinalizer(d, lxcUnload)
// Expand config and devices
err := d.(*lxc).expandConfig(profiles)
if err != nil {
return nil, err
}
err = d.(*lxc).expandDevices(profiles)
if err != nil {
return nil, err
}
return d, nil
}
// Unload is called by the garbage collector
func lxcUnload(d *lxc) {
runtime.SetFinalizer(d, nil)
if d.c != nil {
d.c.Release()
d.c = nil
}
}
// Create a container struct without initializing it.
func lxcInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) instance.Instance {
d := &lxc{
common: common{
state: s,
architecture: args.Architecture,
creationDate: args.CreationDate,
dbType: args.Type,
description: args.Description,
ephemeral: args.Ephemeral,
expiryDate: args.ExpiryDate,
id: args.ID,
lastUsedDate: args.LastUsedDate,
localConfig: args.Config,
localDevices: args.Devices,
logger: logging.AddContext(logger.Log, log.Ctx{"instanceType": args.Type, "instance": args.Name, "project": args.Project}),
name: args.Name,
node: args.Node,
profiles: args.Profiles,
project: args.Project,
snapshot: args.Snapshot,
stateful: args.Stateful,
},
}
// Cleanup the zero values
if d.expiryDate.IsZero() {
d.expiryDate = time.Time{}
}
if d.creationDate.IsZero() {
d.creationDate = time.Time{}
}
if d.lastUsedDate.IsZero() {
d.lastUsedDate = time.Time{}
}
// This is passed during expanded config validation.
if expandedDevices != nil {
d.expandedDevices = expandedDevices
}
return d
}
// The LXC container driver.
type lxc struct {
common
// Config handling.
fromHook bool
// Cached handles.
// Do not use these variables directly, instead use their associated get functions so they
// will be initialised on demand.
c *liblxc.Container
cConfig bool
idmapset *idmap.IdmapSet
storagePool storagePools.Pool
}
func idmapSize(state *state.State, isolatedStr string, size string) (int64, error) {
isolated := false
if shared.IsTrue(isolatedStr) {
isolated = true
}
var idMapSize int64
if size == "" || size == "auto" {
if isolated {
idMapSize = 65536
} else {
if len(state.OS.IdmapSet.Idmap) != 2 {
return 0, fmt.Errorf("bad initial idmap: %v", state.OS.IdmapSet)
}
idMapSize = state.OS.IdmapSet.Idmap[0].Maprange
}
} else {
size, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return 0, err
}
idMapSize = size
}
return idMapSize, nil
}
var idmapLock sync.Mutex
func findIdmap(state *state.State, cName string, isolatedStr string, configBase string, configSize string, rawIdmap string) (*idmap.IdmapSet, int64, error) {
isolated := false
if shared.IsTrue(isolatedStr) {
isolated = true
}
rawMaps, err := instance.ParseRawIdmap(rawIdmap)
if err != nil {
return nil, 0, err
}
if !isolated {
newIdmapset := idmap.IdmapSet{Idmap: make([]idmap.IdmapEntry, len(state.OS.IdmapSet.Idmap))}
copy(newIdmapset.Idmap, state.OS.IdmapSet.Idmap)
for _, ent := range rawMaps {
err := newIdmapset.AddSafe(ent)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
}
return &newIdmapset, 0, nil
}
size, err := idmapSize(state, isolatedStr, configSize)
if err != nil {
return nil, 0, err
}
mkIdmap := func(offset int64, size int64) (*idmap.IdmapSet, error) {
set := &idmap.IdmapSet{Idmap: []idmap.IdmapEntry{
{Isuid: true, Nsid: 0, Hostid: offset, Maprange: size},
{Isgid: true, Nsid: 0, Hostid: offset, Maprange: size},
}}
for _, ent := range rawMaps {
err := set.AddSafe(ent)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, err
}
}
return set, nil
}
if configBase != "" {
offset, err := strconv.ParseInt(configBase, 10, 64)
if err != nil {
return nil, 0, err
}
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
idmapLock.Lock()
defer idmapLock.Unlock()
cts, err := instance.LoadNodeAll(state, instancetype.Container)
if err != nil {
return nil, 0, err
}
offset := state.OS.IdmapSet.Idmap[0].Hostid + 65536
mapentries := idmap.ByHostid{}
for _, container := range cts {
if container.Type() != instancetype.Container {
continue
}
name := container.Name()
/* Don't change our map Just Because. */
if name == cName {
continue
}
if container.IsPrivileged() {
continue
}
if !shared.IsTrue(container.ExpandedConfig()["security.idmap.isolated"]) {
continue
}
cBase := int64(0)
if container.ExpandedConfig()["volatile.idmap.base"] != "" {
cBase, err = strconv.ParseInt(container.ExpandedConfig()["volatile.idmap.base"], 10, 64)
if err != nil {
return nil, 0, err
}
}
cSize, err := idmapSize(state, container.ExpandedConfig()["security.idmap.isolated"], container.ExpandedConfig()["security.idmap.size"])
if err != nil {
return nil, 0, err
}
mapentries = append(mapentries, &idmap.IdmapEntry{Hostid: int64(cBase), Maprange: cSize})
}
sort.Sort(mapentries)
for i := range mapentries {
if i == 0 {
if mapentries[0].Hostid < offset+size {
offset = mapentries[0].Hostid + mapentries[0].Maprange
continue
}
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
if mapentries[i-1].Hostid+mapentries[i-1].Maprange > offset {
offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
continue
}
offset = mapentries[i-1].Hostid + mapentries[i-1].Maprange
if offset+size < mapentries[i].Hostid {
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
offset = mapentries[i].Hostid + mapentries[i].Maprange
}
if offset+size < state.OS.IdmapSet.Idmap[0].Hostid+state.OS.IdmapSet.Idmap[0].Maprange {
set, err := mkIdmap(offset, size)
if err != nil && err == idmap.ErrHostIdIsSubId {
return nil, 0, err
}
return set, offset, nil
}
return nil, 0, fmt.Errorf("Not enough uid/gid available for the container")
}
func (d *lxc) init() error {
// Compute the expanded config and device list
err := d.expandConfig(nil)
if err != nil {
return err
}
err = d.expandDevices(nil)
if err != nil {
return err
}
return nil
}
func (d *lxc) initLXC(config bool) error {
// No need to go through all that for snapshots
if d.IsSnapshot() {
return nil
}
// Check if being called from a hook
if d.fromHook {
return fmt.Errorf("You can't use go-lxc from inside a LXC hook")
}
// Check if already initialized
if d.c != nil {
if !config || d.cConfig {
return nil
}
}
// Load the go-lxc struct
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err != nil {
return err
}
// Load cgroup abstraction
cg, err := d.cgroup(cc)
if err != nil {
return err
}
freeContainer := true
defer func() {
if freeContainer {
cc.Release()
}
}()
// Setup logging
logfile := d.LogFilePath()
err = lxcSetConfigItem(cc, "lxc.log.file", logfile)
if err != nil {
return err
}
logLevel := "warn"
if daemon.Debug {
logLevel = "trace"
} else if daemon.Verbose {
logLevel = "info"
}
err = lxcSetConfigItem(cc, "lxc.log.level", logLevel)
if err != nil {
return err
}
if util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
// Default size log buffer
err = lxcSetConfigItem(cc, "lxc.console.buffer.size", "auto")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.console.size", "auto")
if err != nil {
return err
}
// File to dump ringbuffer contents to when requested or
// container shutdown.
consoleBufferLogFile := d.ConsoleBufferLogPath()
err = lxcSetConfigItem(cc, "lxc.console.logfile", consoleBufferLogFile)
if err != nil {
return err
}
}
// Allow for lightweight init
d.cConfig = config
if !config {
if d.c != nil {
d.c.Release()
}
d.c = cc
freeContainer = false
return nil
}
if d.IsPrivileged() {
// Base config
toDrop := "sys_time sys_module sys_rawio"
if !d.state.OS.AppArmorStacking || d.state.OS.AppArmorStacked {
toDrop = toDrop + " mac_admin mac_override"
}
err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop)
if err != nil {
return err
}
}
// Set an appropriate /proc, /sys/ and /sys/fs/cgroup
mounts := []string{}
if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
mounts = append(mounts, "proc:mixed")
mounts = append(mounts, "sys:mixed")
} else {
mounts = append(mounts, "proc:rw")
mounts = append(mounts, "sys:rw")
}
cgInfo := cgroup.GetInfo()
if cgInfo.Namespacing {
if cgInfo.Layout == cgroup.CgroupsUnified {
mounts = append(mounts, "cgroup:rw:force")
} else {
mounts = append(mounts, "cgroup:mixed")
}
} else {
mounts = append(mounts, "cgroup:mixed")
}
err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " "))
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.autodev", "1")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.pty.max", "1024")
if err != nil {
return err
}
bindMounts := []string{
"/dev/fuse",
"/dev/net/tun",
"/proc/sys/fs/binfmt_misc",
"/sys/firmware/efi/efivars",
"/sys/fs/fuse/connections",
"/sys/fs/pstore",
"/sys/kernel/config",
"/sys/kernel/debug",
"/sys/kernel/security",
"/sys/kernel/tracing",
}
if d.IsPrivileged() && !d.state.OS.RunningInUserNS {
err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0")
if err != nil {
return err
}
} else {
bindMounts = append(bindMounts, "/dev/mqueue")
}
for _, mnt := range bindMounts {
if !shared.PathExists(mnt) {
continue
}
if shared.IsDir(mnt) {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
if err != nil {
return err
}
} else {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional 0 0", mnt, strings.TrimPrefix(mnt, "/")))
if err != nil {
return err
}
}
}
// For lxcfs
templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
if templateConfDir == "" {
templateConfDir = "/usr/share/lxc/config"
}
if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) {
err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir))
if err != nil {
return err
}
}
// Configure devices cgroup
if d.IsPrivileged() && !d.state.OS.RunningInUserNS && d.state.OS.CGInfo.Supports(cgroup.Devices, cg) {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(cc, "lxc.cgroup2.devices.deny", "a")
} else {
err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a")
}
if err != nil {
return err
}
devices := []string{
"b *:* m", // Allow mknod of block devices
"c *:* m", // Allow mknod of char devices
"c 136:* rwm", // /dev/pts devices
"c 1:3 rwm", // /dev/null
"c 1:5 rwm", // /dev/zero
"c 1:7 rwm", // /dev/full
"c 1:8 rwm", // /dev/random
"c 1:9 rwm", // /dev/urandom
"c 5:0 rwm", // /dev/tty
"c 5:1 rwm", // /dev/console
"c 5:2 rwm", // /dev/ptmx
"c 10:229 rwm", // /dev/fuse
"c 10:200 rwm", // /dev/net/tun
}
for _, dev := range devices {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(cc, "lxc.cgroup2.devices.allow", dev)
} else {
err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev)
}
if err != nil {
return err
}
}
}
if d.IsNesting() {
/*
* mount extra /proc and /sys to work around kernel
* restrictions on remounting them when covered
*/
err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional 0 0")
if err != nil {
return err
}
err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional 0 0")
if err != nil {
return err
}
}
// Setup architecture
personality, err := osarch.ArchitecturePersonality(d.architecture)
if err != nil {
personality, err = osarch.ArchitecturePersonality(d.state.OS.Architectures[0])
if err != nil {
return err
}
}
err = lxcSetConfigItem(cc, "lxc.arch", personality)
if err != nil {
return err
}
// Setup the hooks
err = lxcSetConfigItem(cc, "lxc.hook.version", "1")
if err != nil {
return err
}
// Call the onstart hook on start.
err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("/proc/%d/exe callhook %s %s %s start", os.Getpid(), shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Call the onstopns hook on stop but before namespaces are unmounted.
err = lxcSetConfigItem(cc, "lxc.hook.stop", fmt.Sprintf("%s callhook %s %s %s stopns", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Call the onstop hook on stop.
err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %s %s stop", d.state.OS.ExecPath, shared.VarPath(""), strconv.Quote(d.Project()), strconv.Quote(d.Name())))
if err != nil {
return err
}
// Setup the console
err = lxcSetConfigItem(cc, "lxc.tty.max", "0")
if err != nil {
return err
}
// Setup the hostname
err = lxcSetConfigItem(cc, "lxc.uts.name", d.Name())
if err != nil {
return err
}
// Setup devlxd
if d.expandedConfig["security.devlxd"] == "" || shared.IsTrue(d.expandedConfig["security.devlxd"]) {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd")))
if err != nil {
return err
}
}
// Setup AppArmor
if d.state.OS.AppArmorAvailable {
if d.state.OS.AppArmorConfined || !d.state.OS.AppArmorAdmin {
// If confined but otherwise able to use AppArmor, use our own profile
curProfile := util.AppArmorProfile()
curProfile = strings.TrimSuffix(curProfile, " (enforce)")
err := lxcSetConfigItem(cc, "lxc.apparmor.profile", curProfile)
if err != nil {
return err
}
} else {
// If not currently confined, use the container's profile
profile := apparmor.InstanceProfileName(d)
/* In the nesting case, we want to enable the inside
* LXD to load its profile. Unprivileged containers can
* load profiles, but privileged containers cannot, so
* let's not use a namespace so they can fall back to
* the old way of nesting, i.e. using the parent's
* profile.
*/
if d.state.OS.AppArmorStacking && !d.state.OS.AppArmorStacked {
profile = fmt.Sprintf("%s//&:%s:", profile, apparmor.InstanceNamespaceName(d))
}
err := lxcSetConfigItem(cc, "lxc.apparmor.profile", profile)
if err != nil {
return err
}
}
}
// Setup Seccomp if necessary
if seccomp.InstanceNeedsPolicy(d) {
err = lxcSetConfigItem(cc, "lxc.seccomp.profile", seccomp.ProfilePath(d))
if err != nil {
return err
}
// Setup notification socket
// System requirement errors are handled during policy generation instead of here
ok, err := seccomp.InstanceNeedsIntercept(d.state, d)
if err == nil && ok {
err = lxcSetConfigItem(cc, "lxc.seccomp.notify.proxy", fmt.Sprintf("unix:%s", shared.VarPath("seccomp.socket")))
if err != nil {
return err
}
}
}
// Setup idmap
idmapset, err := d.NextIdmap()
if err != nil {
return err
}
if idmapset != nil {
lines := idmapset.ToLxcString()
for _, line := range lines {
err := lxcSetConfigItem(cc, "lxc.idmap", line)
if err != nil {
return err
}
}
}
// Setup environment
for k, v := range d.expandedConfig {
if strings.HasPrefix(k, "environment.") {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
if err != nil {
return err
}
}
}
// Setup NVIDIA runtime
if shared.IsTrue(d.expandedConfig["nvidia.runtime"]) {
hookDir := os.Getenv("LXD_LXC_HOOK")
if hookDir == "" {
hookDir = "/usr/share/lxc/hooks"
}
hookPath := filepath.Join(hookDir, "nvidia")
if !shared.PathExists(hookPath) {
return fmt.Errorf("The NVIDIA LXC hook couldn't be found")
}
_, err := exec.LookPath("nvidia-container-cli")
if err != nil {
return fmt.Errorf("The NVIDIA container tools couldn't be found")
}
err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_VISIBLE_DEVICES=none")
if err != nil {
return err
}
nvidiaDriver := d.expandedConfig["nvidia.driver.capabilities"]
if nvidiaDriver == "" {
err = lxcSetConfigItem(cc, "lxc.environment", "NVIDIA_DRIVER_CAPABILITIES=compute,utility")
if err != nil {
return err
}
} else {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_DRIVER_CAPABILITIES=%s", nvidiaDriver))
if err != nil {
return err
}
}
nvidiaRequireCuda := d.expandedConfig["nvidia.require.cuda"]
if nvidiaRequireCuda == "" {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_CUDA=%s", nvidiaRequireCuda))
if err != nil {
return err
}
}
nvidiaRequireDriver := d.expandedConfig["nvidia.require.driver"]
if nvidiaRequireDriver == "" {
err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("NVIDIA_REQUIRE_DRIVER=%s", nvidiaRequireDriver))
if err != nil {
return err
}
}
err = lxcSetConfigItem(cc, "lxc.hook.mount", hookPath)
if err != nil {
return err
}
}
// Memory limits
if d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
memory := d.expandedConfig["limits.memory"]
memoryEnforce := d.expandedConfig["limits.memory.enforce"]
memorySwap := d.expandedConfig["limits.memory.swap"]
memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
// Configure the memory limits
if memory != "" {
var valueInt int64
if strings.HasSuffix(memory, "%") {
percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
if err != nil {
return err
}
memoryTotal, err := shared.DeviceTotalMemory()
if err != nil {
return err
}
valueInt = int64((memoryTotal / 100) * percent)
} else {
valueInt, err = units.ParseByteSizeString(memory)
if err != nil {
return err
}
}
if memoryEnforce == "soft" {
err = cg.SetMemorySoftLimit(valueInt)
if err != nil {
return err
}
} else {
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
err = cg.SetMemoryLimit(valueInt)
if err != nil {
return err
}
err = cg.SetMemorySwapLimit(0)
if err != nil {
return err
}
} else {
err = cg.SetMemoryLimit(valueInt)
if err != nil {
return err
}
}
// Set soft limit to value 10% less than hard limit
err = cg.SetMemorySoftLimit(int64(float64(valueInt) * 0.9))
if err != nil {
return err
}
}
}
if d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
// Configure the swappiness
if memorySwap != "" && !shared.IsTrue(memorySwap) {
err = cg.SetMemorySwappiness(0)
if err != nil {
return err
}
} else if memorySwapPriority != "" {
priority, err := strconv.Atoi(memorySwapPriority)
if err != nil {
return err
}
err = cg.SetMemorySwappiness(int64(60 - 10 + priority))
if err != nil {
return err
}
}
}
}
// CPU limits
cpuPriority := d.expandedConfig["limits.cpu.priority"]
cpuAllowance := d.expandedConfig["limits.cpu.allowance"]
if (cpuPriority != "" || cpuAllowance != "") && d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(cpuAllowance, cpuPriority)
if err != nil {
return err
}
if cpuShares != 1024 {
err = cg.SetCPUShare(cpuShares)
if err != nil {
return err
}
}
if cpuCfsPeriod != -1 && cpuCfsQuota != -1 {
err = cg.SetCPUCfsLimit(cpuCfsPeriod, cpuCfsQuota)
if err != nil {
return err
}
}
}
// Disk priority limits.
diskPriority := d.ExpandedConfig()["limits.disk.priority"]
if diskPriority != "" {
if d.state.OS.CGInfo.Supports(cgroup.BlkioWeight, nil) {
priorityInt, err := strconv.Atoi(diskPriority)
if err != nil {
return err
}
priority := priorityInt * 100
// Minimum valid value is 10
if priority == 0 {
priority = 10
}
err = cg.SetBlkioWeight(int64(priority))
if err != nil {
return err
}
} else {
return fmt.Errorf("Cannot apply limits.disk.priority as blkio.weight cgroup controller is missing")
}
}
// Processes
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
processes := d.expandedConfig["limits.processes"]
if processes != "" {
valueInt, err := strconv.ParseInt(processes, 10, 64)
if err != nil {
return err
}
err = cg.SetMaxProcesses(valueInt)
if err != nil {
return err
}
}
}
// Hugepages
if d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
for i, key := range shared.HugePageSizeKeys {
value := d.expandedConfig[key]
if value != "" {
value, err := units.ParseByteSizeString(value)
if err != nil {
return err
}
err = cg.SetHugepagesLimit(shared.HugePageSizeSuffix[i], value)
if err != nil {
return err
}
}
}
}
// Setup process limits
for k, v := range d.expandedConfig {
if strings.HasPrefix(k, "limits.kernel.") {
prlimitSuffix := strings.TrimPrefix(k, "limits.kernel.")
prlimitKey := fmt.Sprintf("lxc.prlimit.%s", prlimitSuffix)
err = lxcSetConfigItem(cc, prlimitKey, v)
if err != nil {
return err
}
}
}
// Setup shmounts
if d.state.OS.LXCFeatures["mount_injection_file"] {
err = lxcSetConfigItem(cc, "lxc.mount.auto", fmt.Sprintf("shmounts:%s:/dev/.lxd-mounts", d.ShmountsPath()))
} else {
err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", d.ShmountsPath()))
}
if err != nil {
return err
}
// Apply raw.lxc
if lxcConfig, ok := d.expandedConfig["raw.lxc"]; ok {
f, err := ioutil.TempFile("", "lxd_config_")
if err != nil {
return err
}
err = shared.WriteAll(f, []byte(lxcConfig))
f.Close()
defer os.Remove(f.Name())
if err != nil {
return err
}
if err := cc.LoadConfigFile(f.Name()); err != nil {
return fmt.Errorf("Failed to load raw.lxc")
}
}
if d.c != nil {
d.c.Release()
}
d.c = cc
freeContainer = false
return nil
}
func (d *lxc) devlxdEventSend(eventType string, eventMessage interface{}) error {
event := shared.Jmap{}
event["type"] = eventType
event["timestamp"] = time.Now()
event["metadata"] = eventMessage
return d.state.DevlxdEvents.Send(strconv.Itoa(d.ID()), eventType, eventMessage)
}
// RegisterDevices calls the Register() function on all of the instance's devices.
func (d *lxc) RegisterDevices() {
devices := d.ExpandedDevices()
for _, entry := range devices.Sorted() {
dev, _, err := d.deviceLoad(entry.Name, entry.Config)
if err == device.ErrUnsupportedDevType {
continue
}
if err != nil {
d.logger.Error("Failed to load device to register", log.Ctx{"err": err, "device": entry.Name})
continue
}
// Check whether device wants to register for any events.
err = dev.Register()
if err != nil {
d.logger.Error("Failed to register device", log.Ctx{"err": err, "device": entry.Name})
continue
}
}
}
// deviceLoad instantiates and validates a new device and returns it along with enriched config.
func (d *lxc) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
var configCopy deviceConfig.Device
var err error
// Create copy of config and load some fields from volatile if device is nic or infiniband.
if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
configCopy, err = d.FillNetworkDevice(deviceName, rawConfig)
if err != nil {
return nil, nil, err
}
} else {
// Othewise copy the config so it cannot be modified by device.
configCopy = rawConfig.Clone()
}
dev, err := device.New(d, d.state, deviceName, configCopy, d.deviceVolatileGetFunc(deviceName), d.deviceVolatileSetFunc(deviceName))
// Return device and config copy even if error occurs as caller may still use device.
return dev, configCopy, err
}
// deviceAdd loads a new device and calls its Add() function.
func (d *lxc) deviceAdd(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
dev, _, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be added when instance is running")
}
return dev.Add()
}
// deviceStart loads a new device and calls its Start() function.
func (d *lxc) deviceStart(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) (*deviceConfig.RunConfig, error) {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
logger.Debug("Starting device")
dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return nil, err
}
if instanceRunning && !dev.CanHotPlug() {
return nil, fmt.Errorf("Device cannot be started when instance is running")
}
runConf, err := dev.Start()
if err != nil {
return nil, err
}
// If runConf supplied, perform any container specific setup of device.
if runConf != nil {
// Shift device file ownership if needed before mounting into container.
// This needs to be done whether or not container is running.
if len(runConf.Mounts) > 0 {
err := d.deviceStaticShiftMounts(runConf.Mounts)
if err != nil {
return nil, err
}
}
// If container is running and then live attach device.
if instanceRunning {
// Attach mounts if requested.
if len(runConf.Mounts) > 0 {
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return nil, err
}
}
// Add cgroup rules if requested.
if len(runConf.CGroups) > 0 {
err = d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return nil, err
}
}
// Attach network interface if requested.
if len(runConf.NetworkInterface) > 0 {
err = d.deviceAttachNIC(configCopy, runConf.NetworkInterface)
if err != nil {
return nil, err
}
}
// If running, run post start hooks now (if not running LXD will run them
// once the instance is started).
err = d.runHooks(runConf.PostHooks)
if err != nil {
return nil, err
}
}
}
return runConf, nil
}
// deviceStaticShiftMounts statically shift device mount files ownership to active idmap if needed.
func (d *lxc) deviceStaticShiftMounts(mounts []deviceConfig.MountEntryItem) error {
idmapSet, err := d.CurrentIdmap()
if err != nil {
return fmt.Errorf("Failed to get idmap for device: %s", err)
}
// If there is an idmap being applied and LXD not running in a user namespace then shift the
// device files before they are mounted.
if idmapSet != nil && !d.state.OS.RunningInUserNS {
for _, mount := range mounts {
// Skip UID/GID shifting if OwnerShift mode is not static, or the host-side
// DevPath is empty (meaning an unmount request that doesn't need shifting).
if mount.OwnerShift != deviceConfig.MountOwnerShiftStatic || mount.DevPath == "" {
continue
}
err := idmapSet.ShiftFile(mount.DevPath)
if err != nil {
// uidshift failing is weird, but not a big problem. Log and proceed.
d.logger.Debug("Failed to uidshift device", log.Ctx{"mountDevPath": mount.DevPath, "err": err})
}
}
}
return nil
}
// deviceAddCgroupRules live adds cgroup rules to a container.
func (d *lxc) deviceAddCgroupRules(cgroups []deviceConfig.RunConfigItem) error {
cg, err := d.cgroup(nil)
if err != nil {
return err
}
for _, rule := range cgroups {
// Only apply devices cgroup rules if container is running privileged and host has devices cgroup controller.
if strings.HasPrefix(rule.Key, "devices.") && (!d.isCurrentlyPrivileged() || d.state.OS.RunningInUserNS || !d.state.OS.CGInfo.Supports(cgroup.Devices, cg)) {
continue
}
// Add the new device cgroup rule.
err := d.CGroupSet(rule.Key, rule.Value)
if err != nil {
return fmt.Errorf("Failed to add cgroup rule for device")
}
}
return nil
}
// deviceAttachNIC live attaches a NIC device to a container.
func (d *lxc) deviceAttachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem) error {
devName := ""
for _, dev := range netIF {
if dev.Key == "link" {
devName = dev.Value
break
}
}
if devName == "" {
return fmt.Errorf("Device didn't provide a link property to use")
}
// Load the go-lxc struct.
err := d.initLXC(false)
if err != nil {
return err
}
// Add the interface to the container.
err = d.c.AttachInterface(devName, configCopy["name"])
if err != nil {
return fmt.Errorf("Failed to attach interface: %s to %s: %s", devName, configCopy["name"], err)
}
return nil
}
// deviceUpdate loads a new device and calls its Update() function.
func (d *lxc) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, instanceRunning bool) error {
dev, _, err := d.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
err = dev.Update(oldDevices, instanceRunning)
if err != nil {
return err
}
return nil
}
// deviceStop loads a new device and calls its Stop() function.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) deviceStop(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool, stopHookNetnsPath string) error {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
logger.Debug("Stopping device")
dev, configCopy, err := d.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if dev == nil {
return fmt.Errorf("Device stop validation failed for %q: %v", deviceName, err)
}
logger.Error("Device stop validation failed for", log.Ctx{"err": err})
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be stopped when instance is running")
}
runConf, err := dev.Stop()
if err != nil {
return err
}
if runConf != nil {
// If network interface settings returned, then detach NIC from container.
if len(runConf.NetworkInterface) > 0 {
err = d.deviceDetachNIC(configCopy, runConf.NetworkInterface, instanceRunning, stopHookNetnsPath)
if err != nil {
return err
}
}
// Add cgroup rules if requested and container is running.
if len(runConf.CGroups) > 0 && instanceRunning {
err = d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return err
}
}
// Detach mounts if requested and container is running.
if len(runConf.Mounts) > 0 && instanceRunning {
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return err
}
}
// Run post stop hooks irrespective of run state of instance.
err = d.runHooks(runConf.PostHooks)
if err != nil {
return err
}
}
return nil
}
// deviceDetachNIC detaches a NIC device from a container.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) deviceDetachNIC(configCopy map[string]string, netIF []deviceConfig.RunConfigItem, instanceRunning bool, stopHookNetnsPath string) error {
// Get requested device name to detach interface back to on the host.
devName := ""
for _, dev := range netIF {
if dev.Key == "link" {
devName = dev.Value
break
}
}
if devName == "" {
return fmt.Errorf("Device didn't provide a link property to use")
}
// If container is running, perform live detach of interface back to host.
if instanceRunning {
// For some reason, having network config confuses detach, so get our own go-lxc struct.
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err != nil {
return err
}
defer cc.Release()
// Get interfaces inside container.
ifaces, err := cc.Interfaces()
if err != nil {
return fmt.Errorf("Failed to list network interfaces: %v", err)
}
// If interface doesn't exist inside container, cannot proceed.
if !shared.StringInSlice(configCopy["name"], ifaces) {
return nil
}
err = cc.DetachInterfaceRename(configCopy["name"], devName)
if err != nil {
return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
}
} else {
// Currently liblxc does not move devices back to the host on stop that were added
// after the the container was started. For this reason we utilise the lxc.hook.stop
// hook so that we can capture the netns path, enter the namespace and move the nics
// back to the host and rename them if liblxc hasn't already done it.
// We can only move back devices that have an expected host_name record and where
// that device doesn't already exist on the host as if a device exists on the host
// we can't know whether that is because liblxc has moved it back already or whether
// it is a conflicting device.
if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", devName)) {
if stopHookNetnsPath == "" {
return fmt.Errorf("Cannot detach NIC device %q without stopHookNetnsPath being provided", devName)
}
err := d.detachInterfaceRename(stopHookNetnsPath, configCopy["name"], devName)
if err != nil {
return errors.Wrapf(err, "Failed to detach interface: %q to %q", configCopy["name"], devName)
}
d.logger.Debug("Detached NIC device interface", log.Ctx{"name": configCopy["name"], "devName": devName})
}
}
return nil
}
// deviceHandleMounts live attaches or detaches mounts on a container.
// If the mount DevPath is empty the mount action is treated as unmount.
func (d *lxc) deviceHandleMounts(mounts []deviceConfig.MountEntryItem) error {
for _, mount := range mounts {
if mount.DevPath != "" {
flags := 0
// Convert options into flags.
for _, opt := range mount.Opts {
if opt == "bind" {
flags |= unix.MS_BIND
} else if opt == "rbind" {
flags |= unix.MS_BIND | unix.MS_REC
}
}
shiftfs := false
if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic {
shiftfs = true
}
// Mount it into the container.
err := d.insertMount(mount.DevPath, mount.TargetPath, mount.FSType, flags, shiftfs)
if err != nil {
return fmt.Errorf("Failed to add mount for device inside container: %s", err)
}
} else {
relativeTargetPath := strings.TrimPrefix(mount.TargetPath, "/")
if d.FileExists(relativeTargetPath) == nil {
err := d.removeMount(mount.TargetPath)
if err != nil {
return fmt.Errorf("Error unmounting the device path inside container: %s", err)
}
err = d.FileRemove(relativeTargetPath)
if err != nil {
// Only warn here and don't fail as removing a directory
// mount may fail if there was already files inside
// directory before it was mouted over preventing delete.
d.logger.Warn("Could not remove the device path inside container", log.Ctx{"err": err})
}
}
}
}
return nil
}
// deviceRemove loads a new device and calls its Remove() function.
func (d *lxc) deviceRemove(deviceName string, rawConfig deviceConfig.Device, instanceRunning bool) error {
logger := logging.AddContext(d.logger, log.Ctx{"device": deviceName, "type": rawConfig["type"]})
dev, _, err := d.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if dev == nil {
return fmt.Errorf("Device remove validation failed for %q: %v", deviceName, err)
}
logger.Error("Device remove validation failed", log.Ctx{"err": err})
}
if instanceRunning && !dev.CanHotPlug() {
return fmt.Errorf("Device cannot be removed when instance is running")
}
return dev.Remove()
}
// DeviceEventHandler actions the results of a RunConfig after an event has occurred on a device.
func (d *lxc) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
// Device events can only be processed when the container is running.
if !d.IsRunning() {
return nil
}
if runConf == nil {
return nil
}
// Shift device file ownership if needed before mounting devices into container.
if len(runConf.Mounts) > 0 {
err := d.deviceStaticShiftMounts(runConf.Mounts)
if err != nil {
return err
}
err = d.deviceHandleMounts(runConf.Mounts)
if err != nil {
return err
}
}
// Add cgroup rules if requested.
if len(runConf.CGroups) > 0 {
err := d.deviceAddCgroupRules(runConf.CGroups)
if err != nil {
return err
}
}
// Run any post hooks requested.
err := d.runHooks(runConf.PostHooks)
if err != nil {
return err
}
// Generate uevent inside container if requested.
if len(runConf.Uevents) > 0 {
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
for _, eventParts := range runConf.Uevents {
ueventArray := make([]string, 6)
ueventArray[0] = "forkuevent"
ueventArray[2] = "inject"
ueventArray[1] = "--"
ueventArray[3] = fmt.Sprintf("%d", d.InitPID())
ueventArray[4] = fmt.Sprintf("%d", pidFdNr)
length := 0
for _, part := range eventParts {
length = length + len(part) + 1
}
ueventArray[5] = fmt.Sprintf("%d", length)
ueventArray = append(ueventArray, eventParts...)
_, _, err := shared.RunCommandSplit(nil, []*os.File{pidFd}, d.state.OS.ExecPath, ueventArray...)
if err != nil {
return err
}
}
}
return nil
}
// Start functions
func (d *lxc) startCommon() (string, []func() error, error) {
revert := revert.New()
defer revert.Fail()
// Load the go-lxc struct
err := d.initLXC(true)
if err != nil {
return "", nil, errors.Wrap(err, "Load go-lxc struct")
}
// Check that we're not already running
if d.IsRunning() {
return "", nil, fmt.Errorf("The container is already running")
}
// Load any required kernel modules
kernelModules := d.expandedConfig["linux.kernel_modules"]
if kernelModules != "" {
for _, module := range strings.Split(kernelModules, ",") {
module = strings.TrimPrefix(module, " ")
err := util.LoadModule(module)
if err != nil {
return "", nil, fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
}
}
}
// Rotate the log file.
logfile := d.LogFilePath()
if shared.PathExists(logfile) {
os.Remove(logfile + ".old")
err := os.Rename(logfile, logfile+".old")
if err != nil {
return "", nil, err
}
}
// Mount instance root volume.
_, err = d.mount()
if err != nil {
return "", nil, err
}
revert.Add(func() { d.unmount() })
/* Deal with idmap changes */
nextIdmap, err := d.NextIdmap()
if err != nil {
return "", nil, errors.Wrap(err, "Set ID map")
}
diskIdmap, err := d.DiskIdmap()
if err != nil {
return "", nil, errors.Wrap(err, "Set last ID map")
}
// Once mounted, check if filesystem needs shifting.
if !nextIdmap.Equals(diskIdmap) && !(diskIdmap == nil && d.state.OS.Shiftfs) {
if shared.IsTrue(d.expandedConfig["security.protection.shift"]) {
return "", nil, fmt.Errorf("Container is protected against filesystem shifting")
}
d.logger.Debug("Container idmap changed, remapping")
d.updateProgress("Remapping container filesystem")
storageType, err := d.getStorageType()
if err != nil {
return "", nil, errors.Wrap(err, "Storage type")
}
if diskIdmap != nil {
if storageType == "zfs" {
err = diskIdmap.UnshiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.UnshiftBtrfsRootfs(d.RootfsPath(), diskIdmap)
} else {
err = diskIdmap.UnshiftRootfs(d.RootfsPath(), nil)
}
if err != nil {
return "", nil, err
}
}
if nextIdmap != nil && !d.state.OS.Shiftfs {
if storageType == "zfs" {
err = nextIdmap.ShiftRootfs(d.RootfsPath(), storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.ShiftBtrfsRootfs(d.RootfsPath(), nextIdmap)
} else {
err = nextIdmap.ShiftRootfs(d.RootfsPath(), nil)
}
if err != nil {
return "", nil, err
}
}
jsonDiskIdmap := "[]"
if nextIdmap != nil && !d.state.OS.Shiftfs {
idmapBytes, err := json.Marshal(nextIdmap.Idmap)
if err != nil {
return "", nil, err
}
jsonDiskIdmap = string(idmapBytes)
}
err = d.VolatileSet(map[string]string{"volatile.last_state.idmap": jsonDiskIdmap})
if err != nil {
return "", nil, errors.Wrapf(err, "Set volatile.last_state.idmap config key on container %q (id %d)", d.name, d.id)
}
d.updateProgress("")
}
var idmapBytes []byte
if nextIdmap == nil {
idmapBytes = []byte("[]")
} else {
idmapBytes, err = json.Marshal(nextIdmap.Idmap)
if err != nil {
return "", nil, err
}
}
if d.localConfig["volatile.idmap.current"] != string(idmapBytes) {
err = d.VolatileSet(map[string]string{"volatile.idmap.current": string(idmapBytes)})
if err != nil {
return "", nil, errors.Wrapf(err, "Set volatile.idmap.current config key on container %q (id %d)", d.name, d.id)
}
}
// Generate the Seccomp profile
if err := seccomp.CreateProfile(d.state, d); err != nil {
return "", nil, err
}
// Cleanup any existing leftover devices
d.removeUnixDevices()
d.removeDiskDevices()
// Create any missing directories.
err = os.MkdirAll(d.LogPath(), 0700)
if err != nil {
return "", nil, err
}
err = os.MkdirAll(d.DevicesPath(), 0711)
if err != nil {
return "", nil, err
}
err = os.MkdirAll(d.ShmountsPath(), 0711)
if err != nil {
return "", nil, err
}
// Generate UUID if not present.
instUUID := d.localConfig["volatile.uuid"]
if instUUID == "" {
instUUID = uuid.New()
d.VolatileSet(map[string]string{"volatile.uuid": instUUID})
}
// Create the devices
postStartHooks := []func() error{}
nicID := -1
// Setup devices in sorted order, this ensures that device mounts are added in path order.
for _, entry := range d.expandedDevices.Sorted() {
dev := entry // Ensure device variable has local scope for revert.
// Start the device.
runConf, err := d.deviceStart(dev.Name, dev.Config, false)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
// Stop device on failure to setup container.
revert.Add(func() {
err := d.deviceStop(dev.Name, dev.Config, false, "")
if err != nil {
d.logger.Error("Failed to cleanup device", log.Ctx{"devName": dev.Name, "err": err})
}
})
if runConf == nil {
continue
}
// Process rootfs setup.
if runConf.RootFS.Path != "" {
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
// Set the rootfs backend type if supported (must happen before any other lxc.rootfs)
err := lxcSetConfigItem(d.c, "lxc.rootfs.backend", "dir")
if err == nil {
value := d.c.ConfigItem("lxc.rootfs.backend")
if len(value) == 0 || value[0] != "dir" {
lxcSetConfigItem(d.c, "lxc.rootfs.backend", "")
}
}
}
if util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
rootfsPath := fmt.Sprintf("dir:%s", runConf.RootFS.Path)
err = lxcSetConfigItem(d.c, "lxc.rootfs.path", rootfsPath)
} else {
err = lxcSetConfigItem(d.c, "lxc.rootfs", runConf.RootFS.Path)
}
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
}
if len(runConf.RootFS.Opts) > 0 {
err = lxcSetConfigItem(d.c, "lxc.rootfs.options", strings.Join(runConf.RootFS.Opts, ","))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device rootfs '%s'", dev.Name)
}
}
if d.state.OS.Shiftfs && !d.IsPrivileged() && diskIdmap == nil {
// Host side mark mount.
err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
// Container side shift mount.
err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(d.RootfsPath()), strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
// Host side umount of mark mount.
err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(d.RootfsPath())))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
}
}
// Pass any cgroups rules into LXC.
if len(runConf.CGroups) > 0 {
for _, rule := range runConf.CGroups {
if d.state.OS.CGInfo.Layout == cgroup.CgroupsUnified {
err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup2.%s", rule.Key), rule.Value)
} else {
err = lxcSetConfigItem(d.c, fmt.Sprintf("lxc.cgroup.%s", rule.Key), rule.Value)
}
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device cgroup '%s'", dev.Name)
}
}
}
// Pass any mounts into LXC.
if len(runConf.Mounts) > 0 {
for _, mount := range runConf.Mounts {
if shared.StringInSlice("propagation", mount.Opts) && !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
return "", nil, errors.Wrapf(fmt.Errorf("liblxc 3.0 is required for mount propagation configuration"), "Failed to setup device mount '%s'", dev.Name)
}
if mount.OwnerShift == deviceConfig.MountOwnerShiftDynamic && !d.IsPrivileged() {
if !d.state.OS.Shiftfs {
return "", nil, errors.Wrapf(fmt.Errorf("shiftfs is required but isn't supported on system"), "Failed to setup device mount '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.pre-start", fmt.Sprintf("/bin/mount -t shiftfs -o mark,passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.pre-mount", fmt.Sprintf("/bin/mount -t shiftfs -o passthrough=3 %s %s", strconv.Quote(mount.DevPath), strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
err = lxcSetConfigItem(d.c, "lxc.hook.start-host", fmt.Sprintf("/bin/umount -l %s", strconv.Quote(mount.DevPath)))
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount shiftfs '%s'", dev.Name)
}
}
mntVal := fmt.Sprintf("%s %s %s %s %d %d", shared.EscapePathFstab(mount.DevPath), shared.EscapePathFstab(mount.TargetPath), mount.FSType, strings.Join(mount.Opts, ","), mount.Freq, mount.PassNo)
err = lxcSetConfigItem(d.c, "lxc.mount.entry", mntVal)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device mount '%s'", dev.Name)
}
}
}
// Pass any network setup config into LXC.
if len(runConf.NetworkInterface) > 0 {
// Increment nicID so that LXC network index is unique per device.
nicID++
networkKeyPrefix := "lxc.net"
if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
networkKeyPrefix = "lxc.network"
}
for _, nicItem := range runConf.NetworkInterface {
err = lxcSetConfigItem(d.c, fmt.Sprintf("%s.%d.%s", networkKeyPrefix, nicID, nicItem.Key), nicItem.Value)
if err != nil {
return "", nil, errors.Wrapf(err, "Failed to setup device network interface '%s'", dev.Name)
}
}
}
// Add any post start hooks.
if len(runConf.PostHooks) > 0 {
postStartHooks = append(postStartHooks, runConf.PostHooks...)
}
}
// Generate the LXC config
configPath := filepath.Join(d.LogPath(), "lxc.conf")
err = d.c.SaveConfigFile(configPath)
if err != nil {
os.Remove(configPath)
return "", nil, err
}
// Set ownership to match container root
currentIdmapset, err := d.CurrentIdmap()
if err != nil {
return "", nil, err
}
uid := int64(0)
if currentIdmapset != nil {
uid, _ = currentIdmapset.ShiftFromNs(0, 0)
}
err = os.Chown(d.Path(), int(uid), 0)
if err != nil {
return "", nil, err
}
// We only need traversal by root in the container
err = os.Chmod(d.Path(), 0100)
if err != nil {
return "", nil, err
}
// Update the backup.yaml file
err = d.UpdateBackupFile()
if err != nil {
return "", nil, err
}
// If starting stateless, wipe state
if !d.IsStateful() && shared.PathExists(d.StatePath()) {
os.RemoveAll(d.StatePath())
}
// Unmount any previously mounted shiftfs
unix.Unmount(d.RootfsPath(), unix.MNT_DETACH)
revert.Success()
return configPath, postStartHooks, nil
}
// detachInterfaceRename enters the container's network namespace and moves the named interface
// in ifName back to the network namespace of the running process as the name specified in hostName.
func (d *lxc) detachInterfaceRename(netns string, ifName string, hostName string) error {
lxdPID := os.Getpid()
// Run forknet detach
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forknet",
"detach",
"--",
netns,
fmt.Sprintf("%d", lxdPID),
ifName,
hostName,
)
// Process forknet detach response
if err != nil {
return err
}
return nil
}
// Start starts the instance.
func (d *lxc) Start(stateful bool) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "start", []string{"restart", "restore"}, false, false)
if err != nil {
return errors.Wrap(err, "Create container start operation")
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
defer op.Done(nil)
if !daemon.SharedMountsSetup {
err = fmt.Errorf("Daemon failed to setup shared mounts base. Does security.nesting need to be turned on?")
op.Done(err)
return err
}
// Run the shared start code
configPath, postStartHooks, err := d.startCommon()
if err != nil {
op.Done(err)
return errors.Wrap(err, "Failed preparing container for start")
}
ctxMap = log.Ctx{
"action": op.Action(),
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": stateful}
if op.Action() == "start" {
d.logger.Info("Starting container", ctxMap)
}
// If stateful, restore now
if stateful {
if !d.stateful {
err = fmt.Errorf("Container has no existing state to restore")
op.Done(err)
return err
}
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_RESTORE,
StateDir: d.StatePath(),
Function: "snapshot",
Stop: false,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
err := d.Migrate(&criuMigrationArgs)
if err != nil && !d.IsRunning() {
op.Done(err)
return errors.Wrap(err, "Migrate")
}
os.RemoveAll(d.StatePath())
d.stateful = false
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
if err != nil {
op.Done(err)
return errors.Wrap(err, "Start container")
}
// Run any post start hooks.
err = d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
op.Done(err)
return err
}
if op.Action() == "start" {
d.logger.Info("Started container", ctxMap)
d.lifecycle("started", nil)
}
return nil
} else if d.stateful {
/* stateless start required when we have state, let's delete it */
err := os.RemoveAll(d.StatePath())
if err != nil {
op.Done(err)
return err
}
d.stateful = false
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, false)
if err != nil {
op.Done(err)
return errors.Wrap(err, "Persist stateful flag")
}
}
name := project.Instance(d.Project(), d.name)
// Start the LXC container
_, err = shared.RunCommand(
d.state.OS.ExecPath,
"forkstart",
name,
d.state.OS.LxcPath,
configPath)
if err != nil && !d.IsRunning() {
// Attempt to extract the LXC errors
lxcLog := ""
logPath := filepath.Join(d.LogPath(), "lxc.log")
if shared.PathExists(logPath) {
logContent, err := ioutil.ReadFile(logPath)
if err == nil {
for _, line := range strings.Split(string(logContent), "\n") {
fields := strings.Fields(line)
if len(fields) < 4 {
continue
}
// We only care about errors
if fields[2] != "ERROR" {
continue
}
// Prepend the line break
if len(lxcLog) == 0 {
lxcLog += "\n"
}
lxcLog += fmt.Sprintf(" %s\n", strings.Join(fields[0:], " "))
}
}
}
d.logger.Error("Failed starting container", ctxMap)
// Return the actual error
op.Done(err)
return err
}
// Run any post start hooks.
err = d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
op.Done(err)
return err
}
if op.Action() == "start" {
d.logger.Info("Started container", ctxMap)
d.lifecycle("started", nil)
}
return nil
}
// OnHook is the top-level hook handler.
func (d *lxc) OnHook(hookName string, args map[string]string) error {
switch hookName {
case instance.HookStart:
return d.onStart(args)
case instance.HookStopNS:
return d.onStopNS(args)
case instance.HookStop:
return d.onStop(args)
default:
return instance.ErrNotImplemented
}
}
// onStart implements the start hook.
func (d *lxc) onStart(_ map[string]string) error {
// Make sure we can't call go-lxc functions by mistake
d.fromHook = true
// Load the container AppArmor profile
err := apparmor.InstanceLoad(d.state, d)
if err != nil {
return err
}
// Template anything that needs templating
key := "volatile.apply_template"
if d.localConfig[key] != "" {
// Run any template that needs running
err = d.templateApplyNow(instance.TemplateTrigger(d.localConfig[key]))
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
// Remove the volatile key from the DB
err := d.state.Cluster.DeleteInstanceConfigKey(d.id, key)
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
}
err = d.templateApplyNow("start")
if err != nil {
apparmor.InstanceUnload(d.state, d)
return err
}
// Trigger a rebalance
cgroup.TaskSchedulerTrigger("container", d.name, "started")
// Apply network priority
if d.expandedConfig["limits.network.priority"] != "" {
go func(d *lxc) {
d.fromHook = false
err := d.setNetworkPriority()
if err != nil {
d.logger.Error("Failed to apply network priority", log.Ctx{"err": err})
}
}(d)
}
// Database updates
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Record current state
err = tx.UpdateInstancePowerState(d.id, "RUNNING")
if err != nil {
return errors.Wrap(err, "Error updating container state")
}
// Update time container last started time
err = tx.UpdateInstanceLastUsedDate(d.id, time.Now().UTC())
if err != nil {
return errors.Wrap(err, "Error updating last used")
}
return nil
})
if err != nil {
return err
}
return nil
}
// Stop functions
func (d *lxc) Stop(stateful bool) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "stop", []string{"restart", "restore"}, false, true)
if err != nil {
return err
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
// Check that we're not already stopped
if !d.IsRunning() {
err = fmt.Errorf("The container is already stopped")
op.Done(err)
return err
}
ctxMap = log.Ctx{
"action": op.Action(),
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": stateful}
if op.Action() == "stop" {
d.logger.Info("Stopping container", ctxMap)
}
// Handle stateful stop
if stateful {
// Cleanup any existing state
stateDir := d.StatePath()
os.RemoveAll(stateDir)
err := os.MkdirAll(stateDir, 0700)
if err != nil {
op.Done(err)
return err
}
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_DUMP,
StateDir: stateDir,
Function: "snapshot",
Stop: true,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
// Checkpoint
err = d.Migrate(&criuMigrationArgs)
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
d.stateful = true
err = d.state.Cluster.UpdateInstanceStatefulFlag(d.id, true)
if err != nil {
d.logger.Error("Failed stopping container", ctxMap)
return err
}
d.logger.Info("Stopped container", ctxMap)
d.lifecycle("stopped", nil)
return nil
} else if shared.PathExists(d.StatePath()) {
os.RemoveAll(d.StatePath())
}
// Load the go-lxc struct
if d.expandedConfig["raw.lxc"] != "" {
err = d.initLXC(true)
if err != nil {
op.Done(err)
return err
}
} else {
err = d.initLXC(false)
if err != nil {
op.Done(err)
return err
}
}
// Load cgroup abstraction
cg, err := d.cgroup(nil)
if err != nil {
op.Done(err)
return err
}
// Fork-bomb mitigation, prevent forking from this point on
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
// Attempt to disable forking new processes
cg.SetMaxProcesses(0)
} else if d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
// Attempt to freeze the container
freezer := make(chan bool, 1)
go func() {
d.Freeze()
freezer <- true
}()
select {
case <-freezer:
case <-time.After(time.Second * 5):
d.Unfreeze()
}
}
err = d.c.Stop()
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
if op.Action() == "stop" {
d.logger.Info("Stopped container", ctxMap)
d.lifecycle("stopped", nil)
}
return nil
}
// Shutdown stops the instance.
func (d *lxc) Shutdown(timeout time.Duration) error {
var ctxMap log.Ctx
// Setup a new operation
exists, op, err := operationlock.CreateWaitGet(d.id, "stop", []string{"restart"}, true, false)
if err != nil {
return err
}
if exists {
// An existing matching operation has now succeeded, return.
return nil
}
// If frozen, resume so the signal can be handled.
if d.IsFrozen() {
err := d.Unfreeze()
if err != nil {
return err
}
}
// Check that we're not already stopped
if !d.IsRunning() {
err = fmt.Errorf("The container is already stopped")
op.Done(err)
return err
}
ctxMap = log.Ctx{
"action": "shutdown",
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"timeout": timeout}
if op.Action() == "stop" {
d.logger.Info("Shutting down container", ctxMap)
}
// Load the go-lxc struct
if d.expandedConfig["raw.lxc"] != "" {
err = d.initLXC(true)
if err != nil {
op.Done(err)
return err
}
} else {
err = d.initLXC(false)
if err != nil {
op.Done(err)
return err
}
}
err = d.c.Shutdown(timeout)
if err != nil {
op.Done(err)
return err
}
err = op.Wait()
if err != nil && d.IsRunning() {
return err
}
if op.Action() == "stop" {
d.logger.Info("Shut down container", ctxMap)
d.lifecycle("shutdown", nil)
}
return nil
}
// Restart restart the instance.
func (d *lxc) Restart(timeout time.Duration) error {
ctxMap := log.Ctx{
"action": "shutdown",
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"timeout": timeout}
d.logger.Info("Restarting container", ctxMap)
err := d.restart(d, timeout)
if err != nil {
return err
}
d.logger.Info("Restarted container", ctxMap)
d.lifecycle("restarted", nil)
return nil
}
// onStopNS is triggered by LXC's stop hook once a container is shutdown but before the container's
// namespaces have been closed. The netns path of the stopped container is provided.
func (d *lxc) onStopNS(args map[string]string) error {
target := args["target"]
netns := args["netns"]
// Validate target.
if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
d.logger.Error("Container sent invalid target to OnStopNS", log.Ctx{"target": target})
return fmt.Errorf("Invalid stop target %q", target)
}
// Clean up devices.
d.cleanupDevices(false, netns)
return nil
}
// onStop is triggered by LXC's post-stop hook once a container is shutdown and after the
// container's namespaces have been closed.
func (d *lxc) onStop(args map[string]string) error {
var err error
target := args["target"]
// Validate target
if !shared.StringInSlice(target, []string{"stop", "reboot"}) {
d.logger.Error("Container sent invalid target to OnStop", log.Ctx{"target": target})
return fmt.Errorf("Invalid stop target: %s", target)
}
// Pick up the existing stop operation lock created in Stop() function.
op := operationlock.Get(d.id)
if op != nil && !shared.StringInSlice(op.Action(), []string{"stop", "restart", "restore"}) {
return fmt.Errorf("Container is already running a %s operation", op.Action())
}
if op == nil && target == "reboot" {
op, err = operationlock.Create(d.id, "restart", false, false)
if err != nil {
return errors.Wrap(err, "Create restart operation")
}
}
// Make sure we can't call go-lxc functions by mistake
d.fromHook = true
// Log user actions
ctxMap := log.Ctx{
"action": target,
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"stateful": false}
if op == nil {
d.logger.Debug("Container initiated", ctxMap)
}
// Record power state
err = d.state.Cluster.UpdateInstancePowerState(d.id, "STOPPED")
if err != nil {
err = errors.Wrap(err, "Failed to set container state")
op.Done(err)
return err
}
go func(d *lxc, target string, op *operationlock.InstanceOperation) {
d.fromHook = false
err = nil
// Unlock on return
defer op.Done(nil)
// Wait for other post-stop actions to be done and the container actually stopping.
d.IsRunning()
d.logger.Debug("Container stopped, cleaning up")
// Clean up devices.
d.cleanupDevices(false, "")
// Remove directory ownership (to avoid issue if uidmap is re-used)
err := os.Chown(d.Path(), 0, 0)
if err != nil {
op.Done(errors.Wrap(err, "Failed clearing ownership"))
return
}
err = os.Chmod(d.Path(), 0100)
if err != nil {
op.Done(errors.Wrap(err, "Failed clearing permissions"))
return
}
// Stop the storage for this container
_, err = d.unmount()
if err != nil {
op.Done(errors.Wrap(err, "Failed unmounting container"))
return
}
// Unload the apparmor profile
err = apparmor.InstanceUnload(d.state, d)
if err != nil {
op.Done(errors.Wrap(err, "Failed to destroy apparmor namespace"))
return
}
// Clean all the unix devices
err = d.removeUnixDevices()
if err != nil {
op.Done(errors.Wrap(err, "Failed to remove unix devices"))
return
}
// Clean all the disk devices
err = d.removeDiskDevices()
if err != nil {
op.Done(errors.Wrap(err, "Failed to remove disk devices"))
return
}
// Log and emit lifecycle if not user triggered
if op == nil {
d.logger.Info("Shut down container", ctxMap)
d.lifecycle("shutdown", nil)
}
// Reboot the container
if target == "reboot" {
// Start the container again
err = d.Start(false)
if err != nil {
op.Done(errors.Wrap(err, "Failed restarting container"))
return
}
d.lifecycle("restarted", nil)
return
}
// Trigger a rebalance
cgroup.TaskSchedulerTrigger("container", d.name, "stopped")
// Destroy ephemeral containers
if d.ephemeral {
err = d.Delete(true)
if err != nil {
op.Done(errors.Wrap(err, "Failed deleting ephemeral container"))
return
}
}
}(d, target, op)
return nil
}
// cleanupDevices performs any needed device cleanup steps when container is stopped.
// Accepts a stopHookNetnsPath argument which is required when run from the onStopNS hook before the
// container's network namespace is unmounted (which is required for NIC device cleanup).
func (d *lxc) cleanupDevices(instanceRunning bool, stopHookNetnsPath string) {
for _, dev := range d.expandedDevices.Reversed() {
// Only stop NIC devices when run from the onStopNS hook, and stop all other devices when run from
// the onStop hook. This way disk devices are stopped after the instance has been fully stopped.
if (stopHookNetnsPath != "" && dev.Config["type"] != "nic") || (stopHookNetnsPath == "" && dev.Config["type"] == "nic") {
continue
}
// Use the device interface if device supports it.
err := d.deviceStop(dev.Name, dev.Config, instanceRunning, stopHookNetnsPath)
if err == device.ErrUnsupportedDevType {
continue
} else if err != nil {
d.logger.Error("Failed to stop device", log.Ctx{"devName": dev.Name, "err": err})
}
}
}
// Freeze functions.
func (d *lxc) Freeze() error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
// Check that we're running
if !d.IsRunning() {
return fmt.Errorf("The container isn't running")
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check if the CGroup is available
if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
d.logger.Info("Unable to freeze container (lack of kernel support)", ctxMap)
return nil
}
// Check that we're not already frozen
if d.IsFrozen() {
return fmt.Errorf("The container is already frozen")
}
d.logger.Info("Freezing container", ctxMap)
// Load the go-lxc struct
err = d.initLXC(false)
if err != nil {
ctxMap["err"] = err
d.logger.Error("Failed freezing container", ctxMap)
return err
}
err = d.c.Freeze()
if err != nil {
ctxMap["err"] = err
d.logger.Error("Failed freezing container", ctxMap)
return err
}
d.logger.Info("Froze container", ctxMap)
d.lifecycle("paused", nil)
return err
}
// Unfreeze unfreezes the instance.
func (d *lxc) Unfreeze() error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
// Check that we're running
if !d.IsRunning() {
return fmt.Errorf("The container isn't running")
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check if the CGroup is available
if !d.state.OS.CGInfo.Supports(cgroup.Freezer, cg) {
d.logger.Info("Unable to unfreeze container (lack of kernel support)", ctxMap)
return nil
}
// Check that we're frozen
if !d.IsFrozen() {
return fmt.Errorf("The container is already running")
}
d.logger.Info("Unfreezing container", ctxMap)
// Load the go-lxc struct
err = d.initLXC(false)
if err != nil {
d.logger.Error("Failed unfreezing container", ctxMap)
return err
}
err = d.c.Unfreeze()
if err != nil {
d.logger.Error("Failed unfreezing container", ctxMap)
}
d.logger.Info("Unfroze container", ctxMap)
d.lifecycle("resumed", nil)
return err
}
// Get lxc container state, with 1 second timeout
// If we don't get a reply, assume the lxc monitor is hung
func (d *lxc) getLxcState() (liblxc.State, error) {
if d.IsSnapshot() {
return liblxc.StateMap["STOPPED"], nil
}
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return liblxc.StateMap["STOPPED"], err
}
if d.c == nil {
return liblxc.StateMap["STOPPED"], nil
}
monitor := make(chan liblxc.State, 1)
go func(c *liblxc.Container) {
monitor <- c.State()
}(d.c)
select {
case state := <-monitor:
return state, nil
case <-time.After(5 * time.Second):
return liblxc.StateMap["FROZEN"], fmt.Errorf("Monitor is hung")
}
}
// Render renders the state of the instance.
func (d *lxc) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
// Ignore err as the arch string on error is correct (unknown)
architectureName, _ := osarch.ArchitectureName(d.architecture)
if d.IsSnapshot() {
// Prepare the ETag
etag := []interface{}{d.expiryDate}
snapState := api.InstanceSnapshot{
CreatedAt: d.creationDate,
ExpandedConfig: d.expandedConfig,
ExpandedDevices: d.expandedDevices.CloneNative(),
LastUsedAt: d.lastUsedDate,
Name: strings.SplitN(d.name, "/", 2)[1],
Stateful: d.stateful,
Size: -1, // Default to uninitialised/error state (0 means no CoW usage).
}
snapState.Architecture = architectureName
snapState.Config = d.localConfig
snapState.Devices = d.localDevices.CloneNative()
snapState.Ephemeral = d.ephemeral
snapState.Profiles = d.profiles
snapState.ExpiresAt = d.expiryDate
for _, option := range options {
err := option(&snapState)
if err != nil {
return nil, nil, err
}
}
return &snapState, etag, nil
}
// Prepare the ETag
etag := []interface{}{d.architecture, d.localConfig, d.localDevices, d.ephemeral, d.profiles}
// FIXME: Render shouldn't directly access the go-lxc struct
cState, err := d.getLxcState()
if err != nil {
return nil, nil, errors.Wrap(err, "Get container stated")
}
statusCode := lxcStatusCode(cState)
instState := api.Instance{
ExpandedConfig: d.expandedConfig,
ExpandedDevices: d.expandedDevices.CloneNative(),
Name: d.name,
Status: statusCode.String(),
StatusCode: statusCode,
Location: d.node,
Type: d.Type().String(),
}
instState.Description = d.description
instState.Architecture = architectureName
instState.Config = d.localConfig
instState.CreatedAt = d.creationDate
instState.Devices = d.localDevices.CloneNative()
instState.Ephemeral = d.ephemeral
instState.LastUsedAt = d.lastUsedDate
instState.Profiles = d.profiles
instState.Stateful = d.stateful
for _, option := range options {
err := option(&instState)
if err != nil {
return nil, nil, err
}
}
return &instState, etag, nil
}
// RenderFull renders the full state of the instance.
func (d *lxc) RenderFull() (*api.InstanceFull, interface{}, error) {
if d.IsSnapshot() {
return nil, nil, fmt.Errorf("RenderFull only works with containers")
}
// Get the Container struct
base, etag, err := d.Render()
if err != nil {
return nil, nil, err
}
// Convert to ContainerFull
ct := api.InstanceFull{Instance: *base.(*api.Instance)}
// Add the ContainerState
ct.State, err = d.RenderState()
if err != nil {
return nil, nil, err
}
// Add the ContainerSnapshots
snaps, err := d.Snapshots()
if err != nil {
return nil, nil, err
}
for _, snap := range snaps {
render, _, err := snap.Render()
if err != nil {
return nil, nil, err
}
if ct.Snapshots == nil {
ct.Snapshots = []api.InstanceSnapshot{}
}
ct.Snapshots = append(ct.Snapshots, *render.(*api.InstanceSnapshot))
}
// Add the ContainerBackups
backups, err := d.Backups()
if err != nil {
return nil, nil, err
}
for _, backup := range backups {
render := backup.Render()
if ct.Backups == nil {
ct.Backups = []api.InstanceBackup{}
}
ct.Backups = append(ct.Backups, *render)
}
return &ct, etag, nil
}
// RenderState renders just the running state of the instance.
func (d *lxc) RenderState() (*api.InstanceState, error) {
cState, err := d.getLxcState()
if err != nil {
return nil, err
}
statusCode := lxcStatusCode(cState)
status := api.InstanceState{
Status: statusCode.String(),
StatusCode: statusCode,
}
if d.IsRunning() {
pid := d.InitPID()
status.CPU = d.cpuState()
status.Memory = d.memoryState()
status.Network = d.networkState()
status.Pid = int64(pid)
status.Processes = d.processesState()
}
status.Disk = d.diskState()
return &status, nil
}
// Restore restores a snapshot.
func (d *lxc) Restore(sourceContainer instance.Instance, stateful bool) error {
var ctxMap log.Ctx
op, err := operationlock.Create(d.id, "restore", false, false)
if err != nil {
return errors.Wrap(err, "Create restore operation")
}
defer op.Done(nil)
// Stop the container.
wasRunning := false
if d.IsRunning() {
wasRunning = true
ephemeral := d.IsEphemeral()
if ephemeral {
// Unset ephemeral flag.
args := db.InstanceArgs{
Architecture: d.Architecture(),
Config: d.LocalConfig(),
Description: d.Description(),
Devices: d.LocalDevices(),
Ephemeral: false,
Profiles: d.Profiles(),
Project: d.Project(),
Type: d.Type(),
Snapshot: d.IsSnapshot(),
}
err := d.Update(args, false)
if err != nil {
op.Done(err)
return err
}
// On function return, set the flag back on.
defer func() {
args.Ephemeral = ephemeral
d.Update(args, false)
}()
}
// This will unmount the container storage.
err := d.Stop(false)
if err != nil {
op.Done(err)
return err
}
// Refresh the operation as that one is now complete.
op, err = operationlock.Create(d.id, "restore", false, false)
if err != nil {
return errors.Wrap(err, "Create restore operation")
}
defer op.Done(nil)
}
ctxMap = log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"source": sourceContainer.Name()}
d.logger.Info("Restoring container", ctxMap)
// Initialize storage interface for the container and mount the rootfs for criu state check.
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
op.Done(err)
return err
}
d.logger.Debug("Mounting instance to check for CRIU state path existence")
// Ensure that storage is mounted for state path checks and for backup.yaml updates.
_, err = pool.MountInstance(d, nil)
if err != nil {
op.Done(err)
return err
}
// Check for CRIU if necessary, before doing a bunch of filesystem manipulations.
// Requires container be mounted to check StatePath exists.
if shared.PathExists(d.StatePath()) {
_, err := exec.LookPath("criu")
if err != nil {
err = fmt.Errorf("Failed to restore container state. CRIU isn't installed")
op.Done(err)
return err
}
}
_, err = pool.UnmountInstance(d, nil)
if err != nil {
op.Done(err)
return err
}
// Restore the rootfs.
err = pool.RestoreInstanceSnapshot(d, sourceContainer, nil)
if err != nil {
op.Done(err)
return err
}
// Restore the configuration.
args := db.InstanceArgs{
Architecture: sourceContainer.Architecture(),
Config: sourceContainer.LocalConfig(),
Description: sourceContainer.Description(),
Devices: sourceContainer.LocalDevices(),
Ephemeral: sourceContainer.IsEphemeral(),
Profiles: sourceContainer.Profiles(),
Project: sourceContainer.Project(),
Type: sourceContainer.Type(),
Snapshot: sourceContainer.IsSnapshot(),
}
// Don't pass as user-requested as there's no way to fix a bad config.
// This will call d.UpdateBackupFile() to ensure snapshot list is up to date.
err = d.Update(args, false)
if err != nil {
op.Done(err)
return err
}
// If the container wasn't running but was stateful, should we restore it as running?
if stateful == true {
if !shared.PathExists(d.StatePath()) {
err = fmt.Errorf("Stateful snapshot restore requested by snapshot is stateless")
op.Done(err)
return err
}
d.logger.Debug("Performing stateful restore", ctxMap)
d.stateful = true
criuMigrationArgs := instance.CriuMigrationArgs{
Cmd: liblxc.MIGRATE_RESTORE,
StateDir: d.StatePath(),
Function: "snapshot",
Stop: false,
ActionScript: false,
DumpDir: "",
PreDumpDir: "",
}
// Checkpoint.
err := d.Migrate(&criuMigrationArgs)
if err != nil {
op.Done(err)
return err
}
// Remove the state from the parent container; we only keep this in snapshots.
err2 := os.RemoveAll(d.StatePath())
if err2 != nil && !os.IsNotExist(err) {
op.Done(err)
return err
}
if err != nil {
op.Done(err)
return err
}
d.logger.Debug("Performed stateful restore", ctxMap)
d.logger.Info("Restored container", ctxMap)
return nil
}
// Restart the container.
if wasRunning {
d.logger.Debug("Starting instance after snapshot restore")
err = d.Start(false)
if err != nil {
op.Done(err)
return err
}
}
d.lifecycle("restored", map[string]interface{}{"snapshot": sourceContainer.Name()})
d.logger.Info("Restored container", ctxMap)
return nil
}
func (d *lxc) cleanup() {
// Unmount any leftovers
d.removeUnixDevices()
d.removeDiskDevices()
// Remove the security profiles
apparmor.InstanceDelete(d.state, d)
seccomp.DeleteProfile(d)
// Remove the devices path
os.Remove(d.DevicesPath())
// Remove the shmounts path
os.RemoveAll(d.ShmountsPath())
}
// Delete deletes the instance.
func (d *lxc) Delete(force bool) error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
d.logger.Info("Deleting container", ctxMap)
if !force && shared.IsTrue(d.expandedConfig["security.protection.delete"]) && !d.IsSnapshot() {
err := fmt.Errorf("Container is protected")
d.logger.Warn("Failed to delete container", log.Ctx{"err": err})
return err
}
isImport := false
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil && err != db.ErrNoSuchObject {
return err
} else if pool != nil {
// Check if we're dealing with "lxd import".
// "lxd import" is used for disaster recovery, where you already have a container
// and snapshots on disk but no DB entry. As such if something has gone wrong during
// the creation of the instance and we are now being asked to delete the instance,
// we should not remove the storage volumes themselves as this would cause data loss.
cName, _, _ := shared.InstanceGetParentAndSnapshotName(d.Name())
importingFilePath := storagePools.InstanceImportingFilePath(d.Type(), pool.Name(), d.Project(), cName)
if shared.PathExists(importingFilePath) {
isImport = true
}
if d.IsSnapshot() {
if !isImport {
// Remove snapshot volume and database record.
err = pool.DeleteInstanceSnapshot(d, nil)
if err != nil {
return err
}
}
} else {
// Remove all snapshots by initialising each snapshot as an Instance and
// calling its Delete function.
err := instance.DeleteSnapshots(d.state, d.Project(), d.Name())
if err != nil {
d.logger.Error("Failed to delete instance snapshots", log.Ctx{"err": err})
return err
}
if !isImport {
// Remove the storage volume, snapshot volumes and database records.
err = pool.DeleteInstance(d, nil)
if err != nil {
return err
}
}
}
}
// Perform other cleanup steps if not snapshot.
if !d.IsSnapshot() {
// Remove all backups.
backups, err := d.Backups()
if err != nil {
return err
}
for _, backup := range backups {
err = backup.Delete()
if err != nil {
return err
}
}
// Delete the MAAS entry.
err = d.maasDelete()
if err != nil {
d.logger.Error("Failed deleting container MAAS record", log.Ctx{"err": err})
return err
}
// Remove devices from container.
for k, m := range d.expandedDevices {
err = d.deviceRemove(k, m, false)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device %q", k)
}
}
// Clean things up.
d.cleanup()
}
// Remove the database record of the instance or snapshot instance.
if err := d.state.Cluster.DeleteInstance(d.project, d.Name()); err != nil {
d.logger.Error("Failed deleting container entry", log.Ctx{"err": err})
return err
}
// If dealing with a snapshot, refresh the backup file on the parent.
if d.IsSnapshot() && !isImport {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
// Load the parent.
parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
return errors.Wrap(err, "Invalid parent")
}
// Update the backup file.
err = parent.UpdateBackupFile()
if err != nil {
return err
}
}
d.logger.Info("Deleted container", ctxMap)
d.lifecycle("deleted", nil)
return nil
}
// Rename renames the instance. Accepts an argument to enable applying deferred TemplateTriggerRename.
func (d *lxc) Rename(newName string, applyTemplateTrigger bool) error {
oldName := d.Name()
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"newname": newName}
d.logger.Info("Renaming container", ctxMap)
// Sanity checks.
err := instance.ValidName(newName, d.IsSnapshot())
if err != nil {
return err
}
if d.IsRunning() {
return fmt.Errorf("Renaming of running container not allowed")
}
// Clean things up.
d.cleanup()
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
return errors.Wrap(err, "Load instance storage pool")
}
if d.IsSnapshot() {
_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
err = pool.RenameInstanceSnapshot(d, newSnapName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance snapshot")
}
} else {
err = pool.RenameInstance(d, newName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance")
}
if applyTemplateTrigger {
err = d.DeferTemplateApply(instance.TemplateTriggerRename)
if err != nil {
return err
}
}
}
if !d.IsSnapshot() {
// Rename all the instance snapshot database entries.
results, err := d.state.Cluster.GetInstanceSnapshotsNames(d.project, oldName)
if err != nil {
d.logger.Error("Failed to get container snapshots", ctxMap)
return errors.Wrapf(err, "Failed to get container snapshots")
}
for _, sname := range results {
// Rename the snapshot.
oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
baseSnapName := filepath.Base(sname)
err := d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return tx.RenameInstanceSnapshot(d.project, oldName, oldSnapName, baseSnapName)
})
if err != nil {
d.logger.Error("Failed renaming snapshot", ctxMap)
return errors.Wrapf(err, "Failed renaming snapshot")
}
}
}
// Rename the instance database entry.
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
if d.IsSnapshot() {
oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
return tx.RenameInstanceSnapshot(d.project, oldParts[0], oldParts[1], newParts[1])
}
return tx.RenameInstance(d.project, oldName, newName)
})
if err != nil {
d.logger.Error("Failed renaming container", ctxMap)
return errors.Wrapf(err, "Failed renaming container")
}
// Rename the logging path.
newFullName := project.Instance(d.Project(), d.Name())
os.RemoveAll(shared.LogPath(newFullName))
if shared.PathExists(d.LogPath()) {
err := os.Rename(d.LogPath(), shared.LogPath(newFullName))
if err != nil {
d.logger.Error("Failed renaming container", ctxMap)
return errors.Wrapf(err, "Failed renaming container")
}
}
// Rename the MAAS entry.
if !d.IsSnapshot() {
err = d.maasRename(newName)
if err != nil {
return err
}
}
revert := revert.New()
defer revert.Fail()
// Set the new name in the struct.
d.name = newName
revert.Add(func() { d.name = oldName })
// Rename the backups.
backups, err := d.Backups()
if err != nil {
return err
}
for _, backup := range backups {
b := backup
oldName := b.Name()
backupName := strings.Split(oldName, "/")[1]
newName := fmt.Sprintf("%s/%s", newName, backupName)
err = b.Rename(newName)
if err != nil {
return err
}
revert.Add(func() { b.Rename(oldName) })
}
// Invalidate the go-lxc cache.
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
// Update lease files.
network.UpdateDNSMasqStatic(d.state, "")
err = d.UpdateBackupFile()
if err != nil {
return err
}
d.logger.Info("Renamed container", ctxMap)
d.lifecycle("renamed", map[string]interface{}{"old_name": oldName})
revert.Success()
return nil
}
// CGroupSet sets a cgroup value for the instance.
func (d *lxc) CGroupSet(key string, value string) error {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return err
}
// Make sure the container is running
if !d.IsRunning() {
return fmt.Errorf("Can't set cgroups on a stopped container")
}
err = d.c.SetCgroupItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err)
}
return nil
}
// Update applies updated config.
func (d *lxc) Update(args db.InstanceArgs, userRequested bool) error {
// Set sane defaults for unset keys
if args.Project == "" {
args.Project = project.Default
}
if args.Architecture == 0 {
args.Architecture = d.architecture
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.Devices == nil {
args.Devices = deviceConfig.Devices{}
}
if args.Profiles == nil {
args.Profiles = []string{}
}
if userRequested {
// Validate the new config
err := instance.ValidConfig(d.state.OS, args.Config, false, false)
if err != nil {
return errors.Wrap(err, "Invalid config")
}
// Validate the new devices without using expanded devices validation (expensive checks disabled).
err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), args.Devices, false)
if err != nil {
return errors.Wrap(err, "Invalid devices")
}
}
// Validate the new profiles
profiles, err := d.state.Cluster.GetProfileNames(args.Project)
if err != nil {
return errors.Wrap(err, "Failed to get profiles")
}
checkedProfiles := []string{}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
if shared.StringInSlice(profile, checkedProfiles) {
return fmt.Errorf("Duplicate profile found in request")
}
checkedProfiles = append(checkedProfiles, profile)
}
// Validate the new architecture
if args.Architecture != 0 {
_, err = osarch.ArchitectureName(args.Architecture)
if err != nil {
return fmt.Errorf("Invalid architecture id: %s", err)
}
}
// Get a copy of the old configuration
oldDescription := d.Description()
oldArchitecture := 0
err = shared.DeepCopy(&d.architecture, &oldArchitecture)
if err != nil {
return err
}
oldEphemeral := false
err = shared.DeepCopy(&d.ephemeral, &oldEphemeral)
if err != nil {
return err
}
oldExpandedDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&d.expandedDevices, &oldExpandedDevices)
if err != nil {
return err
}
oldExpandedConfig := map[string]string{}
err = shared.DeepCopy(&d.expandedConfig, &oldExpandedConfig)
if err != nil {
return err
}
oldLocalDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&d.localDevices, &oldLocalDevices)
if err != nil {
return err
}
oldLocalConfig := map[string]string{}
err = shared.DeepCopy(&d.localConfig, &oldLocalConfig)
if err != nil {
return err
}
oldProfiles := []string{}
err = shared.DeepCopy(&d.profiles, &oldProfiles)
if err != nil {
return err
}
oldExpiryDate := d.expiryDate
// Define a function which reverts everything. Defer this function
// so that it doesn't need to be explicitly called in every failing
// return path. Track whether or not we want to undo the changes
// using a closure.
undoChanges := true
defer func() {
if undoChanges {
d.description = oldDescription
d.architecture = oldArchitecture
d.ephemeral = oldEphemeral
d.expandedConfig = oldExpandedConfig
d.expandedDevices = oldExpandedDevices
d.localConfig = oldLocalConfig
d.localDevices = oldLocalDevices
d.profiles = oldProfiles
d.expiryDate = oldExpiryDate
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
d.initLXC(true)
cgroup.TaskSchedulerTrigger("container", d.name, "changed")
}
}()
// Apply the various changes
d.description = args.Description
d.architecture = args.Architecture
d.ephemeral = args.Ephemeral
d.localConfig = args.Config
d.localDevices = args.Devices
d.profiles = args.Profiles
d.expiryDate = args.ExpiryDate
// Expand the config and refresh the LXC config
err = d.expandConfig(nil)
if err != nil {
return errors.Wrap(err, "Expand config")
}
err = d.expandDevices(nil)
if err != nil {
return errors.Wrap(err, "Expand devices")
}
// Diff the configurations
changedConfig := []string{}
for key := range oldExpandedConfig {
if oldExpandedConfig[key] != d.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
for key := range d.expandedConfig {
if oldExpandedConfig[key] != d.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
// Diff the devices
removeDevices, addDevices, updateDevices, allUpdatedKeys := oldExpandedDevices.Update(d.expandedDevices, func(oldDevice deviceConfig.Device, newDevice deviceConfig.Device) []string {
// This function needs to return a list of fields that are excluded from differences
// between oldDevice and newDevice. The result of this is that as long as the
// devices are otherwise identical except for the fields returned here, then the
// device is considered to be being "updated" rather than "added & removed".
oldDevType, err := device.LoadByType(d.state, d.Project(), oldDevice)
if err != nil {
return []string{} // Couldn't create Device, so this cannot be an update.
}
newDevType, err := device.LoadByType(d.state, d.Project(), newDevice)
if err != nil {
return []string{} // Couldn't create Device, so this cannot be an update.
}
return newDevType.UpdatableFields(oldDevType)
})
if userRequested {
// Do some validation of the config diff
err = instance.ValidConfig(d.state.OS, d.expandedConfig, false, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded config")
}
// Do full expanded validation of the devices diff.
err = instance.ValidDevices(d.state, d.state.Cluster, d.Project(), d.Type(), d.expandedDevices, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded devices")
}
}
// Run through initLXC to catch anything we missed
if userRequested {
if d.c != nil {
d.c.Release()
d.c = nil
}
d.cConfig = false
err = d.initLXC(true)
if err != nil {
return errors.Wrap(err, "Initialize LXC")
}
}
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// If apparmor changed, re-validate the apparmor profile
if shared.StringInSlice("raw.apparmor", changedConfig) || shared.StringInSlice("security.nesting", changedConfig) {
err = apparmor.InstanceParse(d.state, d)
if err != nil {
return errors.Wrap(err, "Parse AppArmor profile")
}
}
if shared.StringInSlice("security.idmap.isolated", changedConfig) || shared.StringInSlice("security.idmap.base", changedConfig) || shared.StringInSlice("security.idmap.size", changedConfig) || shared.StringInSlice("raw.idmap", changedConfig) || shared.StringInSlice("security.privileged", changedConfig) {
var idmap *idmap.IdmapSet
base := int64(0)
if !d.IsPrivileged() {
// update the idmap
idmap, base, err = findIdmap(
d.state,
d.Name(),
d.expandedConfig["security.idmap.isolated"],
d.expandedConfig["security.idmap.base"],
d.expandedConfig["security.idmap.size"],
d.expandedConfig["raw.idmap"],
)
if err != nil {
return errors.Wrap(err, "Failed to get ID map")
}
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
return err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
d.localConfig["volatile.idmap.next"] = jsonIdmap
d.localConfig["volatile.idmap.base"] = fmt.Sprintf("%v", base)
// Invalid idmap cache
d.idmapset = nil
}
isRunning := d.IsRunning()
// Use the device interface to apply update changes.
err = d.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices, isRunning, userRequested)
if err != nil {
return err
}
// Update MAAS (must run after the MAC addresses have been generated).
updateMAAS := false
for _, key := range []string{"maas.subnet.ipv4", "maas.subnet.ipv6", "ipv4.address", "ipv6.address"} {
if shared.StringInSlice(key, allUpdatedKeys) {
updateMAAS = true
break
}
}
if !d.IsSnapshot() && updateMAAS {
err = d.maasUpdate(oldExpandedDevices.CloneNative())
if err != nil {
return err
}
}
// Apply the live changes
if isRunning {
// Live update the container config
for _, key := range changedConfig {
value := d.expandedConfig[key]
if key == "raw.apparmor" || key == "security.nesting" {
// Update the AppArmor profile
err = apparmor.InstanceLoad(d.state, d)
if err != nil {
return err
}
} else if key == "security.devlxd" {
if value == "" || shared.IsTrue(value) {
err = d.insertMount(shared.VarPath("devlxd"), "/dev/lxd", "none", unix.MS_BIND, false)
if err != nil {
return err
}
} else if d.FileExists("/dev/lxd") == nil {
err = d.removeMount("/dev/lxd")
if err != nil {
return err
}
err = d.FileRemove("/dev/lxd")
if err != nil {
return err
}
}
} else if key == "linux.kernel_modules" && value != "" {
for _, module := range strings.Split(value, ",") {
module = strings.TrimPrefix(module, " ")
err := util.LoadModule(module)
if err != nil {
return fmt.Errorf("Failed to load kernel module '%s': %s", module, err)
}
}
} else if key == "limits.disk.priority" {
if !d.state.OS.CGInfo.Supports(cgroup.Blkio, cg) {
continue
}
priorityInt := 5
diskPriority := d.expandedConfig["limits.disk.priority"]
if diskPriority != "" {
priorityInt, err = strconv.Atoi(diskPriority)
if err != nil {
return err
}
}
// Minimum valid value is 10
priority := int64(priorityInt * 100)
if priority == 0 {
priority = 10
}
cg.SetBlkioWeight(priority)
if err != nil {
return err
}
} else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") {
// Skip if no memory CGroup
if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
continue
}
// Set the new memory limit
memory := d.expandedConfig["limits.memory"]
memoryEnforce := d.expandedConfig["limits.memory.enforce"]
memorySwap := d.expandedConfig["limits.memory.swap"]
var memoryInt int64
// Parse memory
if memory == "" {
memoryInt = -1
} else if strings.HasSuffix(memory, "%") {
percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64)
if err != nil {
return err
}
memoryTotal, err := shared.DeviceTotalMemory()
if err != nil {
return err
}
memoryInt = int64((memoryTotal / 100) * percent)
} else {
memoryInt, err = units.ParseByteSizeString(memory)
if err != nil {
return err
}
}
// Store the old values for revert
oldMemswLimit := int64(-1)
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
oldMemswLimit, err = cg.GetMemorySwapLimit()
if err != nil {
oldMemswLimit = -1
}
}
oldLimit, err := cg.GetMemoryLimit()
if err != nil {
oldLimit = -1
}
oldSoftLimit, err := cg.GetMemorySoftLimit()
if err != nil {
oldSoftLimit = -1
}
revertMemory := func() {
if oldSoftLimit != -1 {
cg.SetMemorySoftLimit(oldSoftLimit)
}
if oldLimit != -1 {
cg.SetMemoryLimit(oldLimit)
}
if oldMemswLimit != -1 {
cg.SetMemorySwapLimit(oldMemswLimit)
}
}
// Reset everything
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) {
err = cg.SetMemorySwapLimit(-1)
if err != nil {
revertMemory()
return err
}
}
err = cg.SetMemoryLimit(-1)
if err != nil {
revertMemory()
return err
}
err = cg.SetMemorySoftLimit(-1)
if err != nil {
revertMemory()
return err
}
// Set the new values
if memoryEnforce == "soft" {
// Set new limit
err = cg.SetMemorySoftLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
} else {
if d.state.OS.CGInfo.Supports(cgroup.MemorySwap, cg) && (memorySwap == "" || shared.IsTrue(memorySwap)) {
err = cg.SetMemoryLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
err = cg.SetMemorySwapLimit(0)
if err != nil {
revertMemory()
return err
}
} else {
err = cg.SetMemoryLimit(memoryInt)
if err != nil {
revertMemory()
return err
}
}
// Set soft limit to value 10% less than hard limit
err = cg.SetMemorySoftLimit(int64(float64(memoryInt) * 0.9))
if err != nil {
revertMemory()
return err
}
}
if !d.state.OS.CGInfo.Supports(cgroup.MemorySwappiness, cg) {
continue
}
// Configure the swappiness
if key == "limits.memory.swap" || key == "limits.memory.swap.priority" {
memorySwap := d.expandedConfig["limits.memory.swap"]
memorySwapPriority := d.expandedConfig["limits.memory.swap.priority"]
if memorySwap != "" && !shared.IsTrue(memorySwap) {
err = cg.SetMemorySwappiness(0)
if err != nil {
return err
}
} else {
priority := 0
if memorySwapPriority != "" {
priority, err = strconv.Atoi(memorySwapPriority)
if err != nil {
return err
}
}
err = cg.SetMemorySwappiness(int64(60 - 10 + priority))
if err != nil {
return err
}
}
}
} else if key == "limits.network.priority" {
err := d.setNetworkPriority()
if err != nil {
return err
}
} else if key == "limits.cpu" {
// Trigger a scheduler re-run
cgroup.TaskSchedulerTrigger("container", d.name, "changed")
} else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" {
// Skip if no cpu CGroup
if !d.state.OS.CGInfo.Supports(cgroup.CPU, cg) {
continue
}
// Apply new CPU limits
cpuShares, cpuCfsQuota, cpuCfsPeriod, err := cgroup.ParseCPU(d.expandedConfig["limits.cpu.allowance"], d.expandedConfig["limits.cpu.priority"])
if err != nil {
return err
}
err = cg.SetCPUShare(cpuShares)
if err != nil {
return err
}
err = cg.SetCPUCfsLimit(cpuCfsPeriod, cpuCfsQuota)
if err != nil {
return err
}
} else if key == "limits.processes" {
if !d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
continue
}
if value == "" {
err = cg.SetMaxProcesses(-1)
if err != nil {
return err
}
} else {
valueInt, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
err = cg.SetMaxProcesses(valueInt)
if err != nil {
return err
}
}
} else if strings.HasPrefix(key, "limits.hugepages.") {
if !d.state.OS.CGInfo.Supports(cgroup.Hugetlb, cg) {
continue
}
pageType := ""
switch key {
case "limits.hugepages.64KB":
pageType = "64KB"
case "limits.hugepages.1MB":
pageType = "1MB"
case "limits.hugepages.2MB":
pageType = "2MB"
case "limits.hugepages.1GB":
pageType = "1GB"
}
valueInt := int64(-1)
if value != "" {
valueInt, err = units.ParseByteSizeString(value)
if err != nil {
return err
}
}
err = cg.SetHugepagesLimit(pageType, valueInt)
if err != nil {
return err
}
}
}
}
// Finally, apply the changes to the database
err = d.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Snapshots should update only their descriptions and expiry date.
if d.IsSnapshot() {
return tx.UpdateInstanceSnapshot(d.id, d.description, d.expiryDate)
}
object, err := tx.GetInstance(d.project, d.name)
if err != nil {
return err
}
object.Description = d.description
object.Architecture = d.architecture
object.Ephemeral = d.ephemeral
object.ExpiryDate = d.expiryDate
object.Config = d.localConfig
object.Profiles = d.profiles
object.Devices = d.localDevices.CloneNative()
return tx.UpdateInstance(d.project, d.name, *object)
})
if err != nil {
return errors.Wrap(err, "Failed to update database")
}
err = d.UpdateBackupFile()
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Failed to write backup file")
}
// Send devlxd notifications
if isRunning {
// Config changes (only for user.* keys
for _, key := range changedConfig {
if !strings.HasPrefix(key, "user.") {
continue
}
msg := map[string]string{
"key": key,
"old_value": oldExpandedConfig[key],
"value": d.expandedConfig[key],
}
err = d.devlxdEventSend("config", msg)
if err != nil {
return err
}
}
// Device changes
for k, m := range removeDevices {
msg := map[string]interface{}{
"action": "removed",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
for k, m := range updateDevices {
msg := map[string]interface{}{
"action": "updated",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
for k, m := range addDevices {
msg := map[string]interface{}{
"action": "added",
"name": k,
"config": m,
}
err = d.devlxdEventSend("device", msg)
if err != nil {
return err
}
}
}
// Success, update the closure to mark that the changes should be kept.
undoChanges = false
if userRequested {
d.lifecycle("updated", nil)
}
return nil
}
func (d *lxc) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices, instanceRunning bool, userRequested bool) error {
// Remove devices in reverse order to how they were added.
for _, dev := range removeDevices.Reversed() {
if instanceRunning {
err := d.deviceStop(dev.Name, dev.Config, instanceRunning, "")
if err == device.ErrUnsupportedDevType {
continue // No point in trying to remove device below.
} else if err != nil {
return errors.Wrapf(err, "Failed to stop device %q", dev.Name)
}
}
err := d.deviceRemove(dev.Name, dev.Config, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device %q", dev.Name)
}
// Check whether we are about to add the same device back with updated config and
// if not, or if the device type has changed, then remove all volatile keys for
// this device (as its an actual removal or a device type change).
err = d.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
if err != nil {
return errors.Wrapf(err, "Failed to reset volatile data for device %q", dev.Name)
}
}
// Add devices in sorted order, this ensures that device mounts are added in path order.
for _, dev := range addDevices.Sorted() {
err := d.deviceAdd(dev.Name, dev.Config, instanceRunning)
if err == device.ErrUnsupportedDevType {
continue // No point in trying to start device below.
} else if err != nil {
if userRequested {
return errors.Wrapf(err, "Failed to add device %q", dev.Name)
}
// If update is non-user requested (i.e from a snapshot restore), there's nothing we can
// do to fix the config and we don't want to prevent the snapshot restore so log and allow.
d.logger.Error("Failed to add device, skipping as non-user requested", log.Ctx{"device": dev.Name, "err": err})
continue
}
if instanceRunning {
_, err := d.deviceStart(dev.Name, dev.Config, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
}
}
for _, dev := range updateDevices.Sorted() {
err := d.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, instanceRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to update device %q", dev.Name)
}
}
return nil
}
// Export backs up the instance.
func (d *lxc) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate}
meta := api.ImageMetadata{}
if d.IsRunning() {
return meta, fmt.Errorf("Cannot export a running instance as an image")
}
d.logger.Info("Exporting instance", ctxMap)
// Start the storage.
_, err := d.mount()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer d.unmount()
// Get IDMap to unshift container as the tarball is created.
idmap, err := d.DiskIdmap()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Create the tarball.
tarWriter := instancewriter.NewInstanceTarWriter(w, idmap)
// Keep track of the first path we saw for each path with nlink>1.
cDir := d.Path()
// Path inside the tar image is the pathname starting after cDir.
offset := len(cDir) + 1
writeToTar := func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
err = tarWriter.WriteFile(path[offset:], path, fi, false)
if err != nil {
d.logger.Debug("Error tarring up", log.Ctx{"path": path, "err": err})
return err
}
return nil
}
// Look for metadata.yaml.
fnam := filepath.Join(cDir, "metadata.yaml")
if !shared.PathExists(fnam) {
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
// Get the instance's architecture.
var arch string
if d.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(d.name)
parent, err := instance.LoadByProjectAndName(d.state, d.project, parentName)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
arch, _ = osarch.ArchitectureName(parent.Architecture())
} else {
arch, _ = osarch.ArchitectureName(d.architecture)
}
if arch == "" {
arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Fill in the metadata.
meta.Architecture = arch
meta.CreationDate = time.Now().UTC().Unix()
meta.Properties = properties
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
tmpOffset := len(path.Dir(fnam)) + 1
if err := tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false); err != nil {
tarWriter.Close()
d.logger.Debug("Error writing to tarfile", log.Ctx{"err": err})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
} else {
// Parse the metadata.
content, err := ioutil.ReadFile(fnam)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
err = yaml.Unmarshal(content, &meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
meta.Properties = properties
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Include metadata.yaml in the tarball.
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
d.logger.Debug("Error statting during export", log.Ctx{"fileName": fnam})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
tmpOffset := len(path.Dir(fnam)) + 1
err = tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false)
} else {
err = tarWriter.WriteFile(fnam[offset:], fnam, fi, false)
}
if err != nil {
tarWriter.Close()
d.logger.Debug("Error writing to tarfile", log.Ctx{"err": err})
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Include all the rootfs files.
fnam = d.RootfsPath()
err = filepath.Walk(fnam, writeToTar)
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Include all the templates.
fnam = d.TemplatesPath()
if shared.PathExists(fnam) {
err = filepath.Walk(fnam, writeToTar)
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
err = tarWriter.Close()
if err != nil {
d.logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
d.logger.Info("Exported instance", ctxMap)
return meta, nil
}
func collectCRIULogFile(d instance.Instance, imagesDir string, function string, method string) error {
t := time.Now().Format(time.RFC3339)
newPath := filepath.Join(d.LogPath(), fmt.Sprintf("%s_%s_%s.log", function, method, t))
return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath)
}
func getCRIULogErrors(imagesDir string, method string) (string, error) {
f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method)))
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
ret := []string{}
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "Error") || strings.Contains(line, "Warn") {
ret = append(ret, scanner.Text())
}
}
return strings.Join(ret, "\n"), nil
}
// Migrate migrates the instance to another node.
func (d *lxc) Migrate(args *instance.CriuMigrationArgs) error {
ctxMap := log.Ctx{
"created": d.creationDate,
"ephemeral": d.ephemeral,
"used": d.lastUsedDate,
"statedir": args.StateDir,
"actionscript": args.ActionScript,
"predumpdir": args.PreDumpDir,
"features": args.Features,
"stop": args.Stop}
_, err := exec.LookPath("criu")
if err != nil {
return fmt.Errorf("Unable to perform container live migration. CRIU isn't installed")
}
d.logger.Info("Migrating container", ctxMap)
prettyCmd := ""
switch args.Cmd {
case liblxc.MIGRATE_PRE_DUMP:
prettyCmd = "pre-dump"
case liblxc.MIGRATE_DUMP:
prettyCmd = "dump"
case liblxc.MIGRATE_RESTORE:
prettyCmd = "restore"
case liblxc.MIGRATE_FEATURE_CHECK:
prettyCmd = "feature-check"
default:
prettyCmd = "unknown"
d.logger.Warn("Unknown migrate call", log.Ctx{"cmd": args.Cmd})
}
pool, err := d.getStoragePool()
if err != nil {
return err
}
preservesInodes := pool.Driver().Info().PreservesInodes
/* This feature was only added in 2.0.1, let's not ask for it
* before then or migrations will fail.
*/
if !util.RuntimeLiblxcVersionAtLeast(2, 0, 1) {
preservesInodes = false
}
finalStateDir := args.StateDir
var migrateErr error
/* For restore, we need an extra fork so that we daemonize monitor
* instead of having it be a child of LXD, so let's hijack the command
* here and do the extra fork.
*/
if args.Cmd == liblxc.MIGRATE_RESTORE {
// Run the shared start
_, postStartHooks, err := d.startCommon()
if err != nil {
return errors.Wrap(err, "Failed preparing container for start")
}
/*
* For unprivileged containers we need to shift the
* perms on the images images so that they can be
* opened by the process after it is in its user
* namespace.
*/
idmapset, err := d.CurrentIdmap()
if err != nil {
return err
}
if idmapset != nil {
storageType, err := d.getStorageType()
if err != nil {
return errors.Wrap(err, "Storage type")
}
if storageType == "zfs" {
err = idmapset.ShiftRootfs(args.StateDir, storageDrivers.ShiftZFSSkipper)
} else if storageType == "btrfs" {
err = storageDrivers.ShiftBtrfsRootfs(args.StateDir, idmapset)
} else {
err = idmapset.ShiftRootfs(args.StateDir, nil)
}
if err != nil {
return err
}
}
configPath := filepath.Join(d.LogPath(), "lxc.conf")
if args.DumpDir != "" {
finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
}
_, migrateErr = shared.RunCommand(
d.state.OS.ExecPath,
"forkmigrate",
d.name,
d.state.OS.LxcPath,
configPath,
finalStateDir,
fmt.Sprintf("%v", preservesInodes))
if migrateErr == nil {
// Run any post start hooks.
err := d.runHooks(postStartHooks)
if err != nil {
// Attempt to stop container.
d.Stop(false)
return err
}
}
} else if args.Cmd == liblxc.MIGRATE_FEATURE_CHECK {
err := d.initLXC(true)
if err != nil {
return err
}
opts := liblxc.MigrateOptions{
FeaturesToCheck: args.Features,
}
migrateErr = d.c.Migrate(args.Cmd, opts)
if migrateErr != nil {
d.logger.Info("CRIU feature check failed", ctxMap)
return migrateErr
}
return nil
} else {
err := d.initLXC(true)
if err != nil {
return err
}
script := ""
if args.ActionScript {
script = filepath.Join(args.StateDir, "action.sh")
}
if args.DumpDir != "" {
finalStateDir = fmt.Sprintf("%s/%s", args.StateDir, args.DumpDir)
}
// TODO: make this configurable? Ultimately I think we don't
// want to do that; what we really want to do is have "modes"
// of criu operation where one is "make this succeed" and the
// other is "make this fast". Anyway, for now, let's choose a
// really big size so it almost always succeeds, even if it is
// slow.
ghostLimit := uint64(256 * 1024 * 1024)
opts := liblxc.MigrateOptions{
Stop: args.Stop,
Directory: finalStateDir,
Verbose: true,
PreservesInodes: preservesInodes,
ActionScript: script,
GhostLimit: ghostLimit,
}
if args.PreDumpDir != "" {
opts.PredumpDir = fmt.Sprintf("../%s", args.PreDumpDir)
}
if !d.IsRunning() {
// otherwise the migration will needlessly fail
args.Stop = false
}
migrateErr = d.c.Migrate(args.Cmd, opts)
}
collectErr := collectCRIULogFile(d, finalStateDir, args.Function, prettyCmd)
if collectErr != nil {
d.logger.Error("Error collecting checkpoint log file", log.Ctx{"err": collectErr})
}
if migrateErr != nil {
log, err2 := getCRIULogErrors(finalStateDir, prettyCmd)
if err2 == nil {
d.logger.Info("Failed migrating container", ctxMap)
migrateErr = fmt.Errorf("%s %s failed\n%s", args.Function, prettyCmd, log)
}
return migrateErr
}
d.logger.Info("Migrated container", ctxMap)
return nil
}
func (d *lxc) templateApplyNow(trigger instance.TemplateTrigger) error {
// If there's no metadata, just return
fname := filepath.Join(d.Path(), "metadata.yaml")
if !shared.PathExists(fname) {
return nil
}
// Parse the metadata
content, err := ioutil.ReadFile(fname)
if err != nil {
return errors.Wrap(err, "Failed to read metadata")
}
metadata := new(api.ImageMetadata)
err = yaml.Unmarshal(content, &metadata)
if err != nil {
return errors.Wrapf(err, "Could not parse %s", fname)
}
// Find rootUID and rootGID
idmapset, err := d.DiskIdmap()
if err != nil {
return errors.Wrap(err, "Failed to set ID map")
}
rootUID := int64(0)
rootGID := int64(0)
// Get the right uid and gid for the container
if idmapset != nil {
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
// Figure out the container architecture
arch, err := osarch.ArchitectureName(d.architecture)
if err != nil {
arch, err = osarch.ArchitectureName(d.state.OS.Architectures[0])
if err != nil {
return errors.Wrap(err, "Failed to detect system architecture")
}
}
// Generate the container metadata
containerMeta := make(map[string]string)
containerMeta["name"] = d.name
containerMeta["type"] = "container"
containerMeta["architecture"] = arch
if d.ephemeral {
containerMeta["ephemeral"] = "true"
} else {
containerMeta["ephemeral"] = "false"
}
if d.IsPrivileged() {
containerMeta["privileged"] = "true"
} else {
containerMeta["privileged"] = "false"
}
// Go through the templates
for tplPath, tpl := range metadata.Templates {
err = func(tplPath string, tpl *api.ImageMetadataTemplate) error {
var w *os.File
// Check if the template should be applied now
found := false
for _, tplTrigger := range tpl.When {
if tplTrigger == string(trigger) {
found = true
break
}
}
if !found {
return nil
}
// Open the file to template, create if needed
fullpath := filepath.Join(d.RootfsPath(), strings.TrimLeft(tplPath, "/"))
if shared.PathExists(fullpath) {
if tpl.CreateOnly {
return nil
}
// Open the existing file
w, err = os.Create(fullpath)
if err != nil {
return errors.Wrap(err, "Failed to create template file")
}
} else {
// Create the directories leading to the file
shared.MkdirAllOwner(path.Dir(fullpath), 0755, int(rootUID), int(rootGID))
// Create the file itself
w, err = os.Create(fullpath)
if err != nil {
return err
}
// Fix ownership and mode
w.Chown(int(rootUID), int(rootGID))
w.Chmod(0644)
}
defer w.Close()
// Read the template
tplString, err := ioutil.ReadFile(filepath.Join(d.TemplatesPath(), tpl.Template))
if err != nil {
return errors.Wrap(err, "Failed to read template file")
}
// Restrict filesystem access to within the container's rootfs
tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", d.name, tpl.Template), template.ChrootLoader{Path: d.RootfsPath()})
tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
if err != nil {
return errors.Wrap(err, "Failed to render template")
}
configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
val, ok := d.expandedConfig[confKey.String()]
if !ok {
return confDefault
}
return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
}
// Render the template
tplRender.ExecuteWriter(pongo2.Context{"trigger": trigger,
"path": tplPath,
"container": containerMeta,
"instance": containerMeta,
"config": d.expandedConfig,
"devices": d.expandedDevices,
"properties": tpl.Properties,
"config_get": configGet}, w)
return nil
}(tplPath, tpl)
if err != nil {
return err
}
}
return nil
}
func (d *lxc) inheritInitPidFd() (int, *os.File) {
if d.state.OS.PidFds {
pidFdFile, err := d.InitPidFd()
if err != nil {
return -1, nil
}
return 3, pidFdFile
}
return -1, nil
}
// FileExists returns whether file exists inside instance.
func (d *lxc) FileExists(path string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Check if the file exists in the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"exists",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
path,
)
// Process forkcheckfile response
if stderr != "" {
if strings.HasPrefix(stderr, "error:") {
return fmt.Errorf(strings.TrimPrefix(strings.TrimSuffix(stderr, "\n"), "error: "))
}
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
d.logger.Debug("forkcheckfile", log.Ctx{"line": line})
}
}
if err != nil {
return err
}
return nil
}
// FilePull gets a file from the instance.
func (d *lxc) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return -1, -1, 0, "", nil, err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Get the file from the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"pull",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
srcpath,
dstpath,
)
uid := int64(-1)
gid := int64(-1)
mode := -1
fileType := "unknown"
var dirEnts []string
var errStr string
// Process forkgetfile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return -1, -1, 0, "", nil, os.ErrNotExist
}
return -1, -1, 0, "", nil, fmt.Errorf(errStr)
}
// Extract the uid
if strings.HasPrefix(line, "uid: ") {
uid, err = strconv.ParseInt(strings.TrimPrefix(line, "uid: "), 10, 64)
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
// Extract the gid
if strings.HasPrefix(line, "gid: ") {
gid, err = strconv.ParseInt(strings.TrimPrefix(line, "gid: "), 10, 64)
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
// Extract the mode
if strings.HasPrefix(line, "mode: ") {
mode, err = strconv.Atoi(strings.TrimPrefix(line, "mode: "))
if err != nil {
return -1, -1, 0, "", nil, err
}
continue
}
if strings.HasPrefix(line, "type: ") {
fileType = strings.TrimPrefix(line, "type: ")
continue
}
if strings.HasPrefix(line, "entry: ") {
ent := strings.TrimPrefix(line, "entry: ")
ent = strings.Replace(ent, "\x00", "\n", -1)
dirEnts = append(dirEnts, ent)
continue
}
d.logger.Debug("forkgetfile", log.Ctx{"line": line})
}
if err != nil {
return -1, -1, 0, "", nil, err
}
// Unmap uid and gid if needed
if !d.IsRunning() {
idmapset, err := d.DiskIdmap()
if err != nil {
return -1, -1, 0, "", nil, err
}
if idmapset != nil {
uid, gid = idmapset.ShiftFromNs(uid, gid)
}
}
return uid, gid, os.FileMode(mode), fileType, dirEnts, nil
}
// FilePush sends a file into the instance.
func (d *lxc) FilePush(fileType string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
var rootUID int64
var rootGID int64
var errStr string
// Map uid and gid if needed
if !d.IsRunning() {
idmapset, err := d.DiskIdmap()
if err != nil {
return err
}
if idmapset != nil {
uid, gid = idmapset.ShiftIntoNs(uid, gid)
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
}
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
defaultMode := 0640
if fileType == "directory" {
defaultMode = 0750
}
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Push the file to the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"push",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
srcpath,
dstpath,
fileType,
fmt.Sprintf("%d", uid),
fmt.Sprintf("%d", gid),
fmt.Sprintf("%d", mode),
fmt.Sprintf("%d", rootUID),
fmt.Sprintf("%d", rootGID),
fmt.Sprintf("%d", int(os.FileMode(defaultMode)&os.ModePerm)),
write,
)
// Process forkgetfile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return os.ErrNotExist
}
return fmt.Errorf(errStr)
}
}
if err != nil {
return err
}
return nil
}
// FileRemove removes a file inside the instance.
func (d *lxc) FileRemove(path string) error {
// Check for ongoing operations (that may involve shifting).
operationlock.Get(d.id).Wait()
var errStr string
// Setup container storage if needed
_, err := d.mount()
if err != nil {
return err
}
defer d.unmount()
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Remove the file from the container
_, stderr, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkfile",
"remove",
d.RootfsPath(),
fmt.Sprintf("%d", d.InitPID()),
fmt.Sprintf("%d", pidFdNr),
path,
)
// Process forkremovefile response
for _, line := range strings.Split(strings.TrimRight(stderr, "\n"), "\n") {
if line == "" {
continue
}
// Extract errors
if strings.HasPrefix(line, "error: ") {
errStr = strings.TrimPrefix(line, "error: ")
continue
}
if strings.HasPrefix(line, "errno: ") {
errno := strings.TrimPrefix(line, "errno: ")
if errno == "2" {
return os.ErrNotExist
}
return fmt.Errorf(errStr)
}
}
if err != nil {
return err
}
return nil
}
// Console attaches to the instance console.
func (d *lxc) Console(protocol string) (*os.File, chan error, error) {
if protocol != instance.ConsoleTypeConsole {
return nil, nil, fmt.Errorf("Container instances don't support %q output", protocol)
}
chDisconnect := make(chan error, 1)
args := []string{
d.state.OS.ExecPath,
"forkconsole",
project.Instance(d.Project(), d.Name()),
d.state.OS.LxcPath,
filepath.Join(d.LogPath(), "lxc.conf"),
"tty=0",
"escape=-1"}
idmapset, err := d.CurrentIdmap()
if err != nil {
return nil, nil, err
}
var rootUID, rootGID int64
if idmapset != nil {
rootUID, rootGID = idmapset.ShiftIntoNs(0, 0)
}
ptx, pty, err := shared.OpenPty(rootUID, rootGID)
if err != nil {
return nil, nil, err
}
cmd := exec.Cmd{}
cmd.Path = d.state.OS.ExecPath
cmd.Args = args
cmd.Stdin = pty
cmd.Stdout = pty
cmd.Stderr = pty
err = cmd.Start()
if err != nil {
return nil, nil, err
}
go func() {
err = cmd.Wait()
ptx.Close()
pty.Close()
}()
go func() {
<-chDisconnect
cmd.Process.Kill()
}()
return ptx, chDisconnect, nil
}
// ConsoleLog returns console log.
func (d *lxc) ConsoleLog(opts liblxc.ConsoleLogOptions) (string, error) {
msg, err := d.c.ConsoleLog(opts)
if err != nil {
return "", err
}
return string(msg), nil
}
// Exec executes a command inside the instance.
func (d *lxc) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
// Prepare the environment
envSlice := []string{}
for k, v := range req.Environment {
envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v))
}
// Setup logfile
logPath := filepath.Join(d.LogPath(), "forkexec.log")
logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
if err != nil {
return nil, err
}
// Prepare the subcommand
cname := project.Instance(d.Project(), d.Name())
args := []string{
d.state.OS.ExecPath,
"forkexec",
cname,
d.state.OS.LxcPath,
filepath.Join(d.LogPath(), "lxc.conf"),
req.Cwd,
fmt.Sprintf("%d", req.User),
fmt.Sprintf("%d", req.Group),
}
args = append(args, "--")
args = append(args, "env")
args = append(args, envSlice...)
args = append(args, "--")
args = append(args, "cmd")
args = append(args, req.Command...)
cmd := exec.Cmd{}
cmd.Path = d.state.OS.ExecPath
cmd.Args = args
cmd.Stdin = nil
cmd.Stdout = logFile
cmd.Stderr = logFile
// Mitigation for CVE-2019-5736
useRexec := false
if d.expandedConfig["raw.idmap"] != "" {
err := instance.AllowedUnprivilegedOnlyMap(d.expandedConfig["raw.idmap"])
if err != nil {
useRexec = true
}
}
if shared.IsTrue(d.expandedConfig["security.privileged"]) {
useRexec = true
}
if useRexec {
cmd.Env = append(os.Environ(), "LXC_MEMFD_REXEC=1")
}
// Setup communication PIPE
rStatus, wStatus, err := os.Pipe()
defer rStatus.Close()
if err != nil {
return nil, err
}
cmd.ExtraFiles = []*os.File{stdin, stdout, stderr, wStatus}
err = cmd.Start()
wStatus.Close()
if err != nil {
return nil, err
}
attachedPid := shared.ReadPid(rStatus)
if attachedPid <= 0 {
cmd.Wait()
d.logger.Error("Failed to retrieve PID of executing child process")
return nil, fmt.Errorf("Failed to retrieve PID of executing child process")
}
d.logger.Debug("Retrieved PID of executing child process", log.Ctx{"attachedPid": attachedPid})
instCmd := &lxcCmd{
cmd: &cmd,
attachedChildPid: int(attachedPid),
}
return instCmd, nil
}
func (d *lxc) cpuState() api.InstanceStateCPU {
cpu := api.InstanceStateCPU{}
// CPU usage in seconds
cg, err := d.cgroup(nil)
if err != nil {
return cpu
}
if !d.state.OS.CGInfo.Supports(cgroup.CPUAcct, cg) {
return cpu
}
value, err := cg.GetCPUAcctUsage()
if err != nil {
cpu.Usage = -1
return cpu
}
cpu.Usage = value
return cpu
}
func (d *lxc) diskState() map[string]api.InstanceStateDisk {
disk := map[string]api.InstanceStateDisk{}
for _, dev := range d.expandedDevices.Sorted() {
if dev.Config["type"] != "disk" {
continue
}
var usage int64
if dev.Config["path"] == "/" {
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
d.logger.Error("Error loading storage pool", log.Ctx{"err": err})
continue
}
usage, err = pool.GetInstanceUsage(d)
if err != nil {
if err != storageDrivers.ErrNotSupported {
d.logger.Error("Error getting disk usage", log.Ctx{"err": err})
}
continue
}
} else if dev.Config["pool"] != "" {
pool, err := storagePools.GetPoolByName(d.state, dev.Config["pool"])
if err != nil {
d.logger.Error("Error loading storage pool", log.Ctx{"poolName": dev.Config["pool"], "err": err})
continue
}
usage, err = pool.GetCustomVolumeUsage(d.Project(), dev.Config["source"])
if err != nil {
if err != storageDrivers.ErrNotSupported {
d.logger.Error("Error getting volume usage", log.Ctx{"volume": dev.Config["source"], "err": err})
}
continue
}
} else {
continue
}
disk[dev.Name] = api.InstanceStateDisk{Usage: usage}
}
return disk
}
func (d *lxc) memoryState() api.InstanceStateMemory {
memory := api.InstanceStateMemory{}
cg, err := d.cgroup(nil)
if err != nil {
return memory
}
if !d.state.OS.CGInfo.Supports(cgroup.Memory, cg) {
return memory
}
// Memory in bytes
value, err := cg.GetMemoryUsage()
if err == nil {
memory.Usage = value
}
// Memory peak in bytes
if d.state.OS.CGInfo.Supports(cgroup.MemoryMaxUsage, cg) {
value, err = cg.GetMemoryMaxUsage()
if err == nil {
memory.UsagePeak = value
}
}
if d.state.OS.CGInfo.Supports(cgroup.MemorySwapUsage, cg) {
// Swap in bytes
if memory.Usage > 0 {
value, err := cg.GetMemorySwapUsage()
if err == nil {
memory.SwapUsage = value
}
}
// Swap peak in bytes
if memory.UsagePeak > 0 {
value, err = cg.GetMemorySwapMaxUsage()
if err == nil {
memory.SwapUsagePeak = value
}
}
}
return memory
}
func (d *lxc) networkState() map[string]api.InstanceStateNetwork {
result := map[string]api.InstanceStateNetwork{}
pid := d.InitPID()
if pid < 1 {
return result
}
couldUseNetnsGetifaddrs := d.state.OS.NetnsGetifaddrs
if couldUseNetnsGetifaddrs {
nw, err := netutils.NetnsGetifaddrs(int32(pid))
if err != nil {
couldUseNetnsGetifaddrs = false
d.logger.Error("Failed to retrieve network information via netlink", log.Ctx{"pid": pid})
} else {
result = nw
}
}
if !couldUseNetnsGetifaddrs {
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
// Get the network state from the container
out, _, err := shared.RunCommandSplit(
nil,
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forknet",
"info",
"--",
fmt.Sprintf("%d", pid),
fmt.Sprintf("%d", pidFdNr))
// Process forkgetnet response
if err != nil {
d.logger.Error("Error calling 'lxd forknet", log.Ctx{"err": err, "pid": pid})
return result
}
// If we can use netns_getifaddrs() but it failed and the setns() +
// netns_getifaddrs() succeeded we should just always fallback to the
// setns() + netns_getifaddrs() style retrieval.
d.state.OS.NetnsGetifaddrs = false
nw := map[string]api.InstanceStateNetwork{}
err = json.Unmarshal([]byte(out), &nw)
if err != nil {
d.logger.Error("Failure to read forknet json", log.Ctx{"err": err})
return result
}
result = nw
}
// Get host_name from volatile data if not set already.
for name, dev := range result {
if dev.HostName == "" {
dev.HostName = d.localConfig[fmt.Sprintf("volatile.%s.host_name", name)]
result[name] = dev
}
}
return result
}
func (d *lxc) processesState() int64 {
// Return 0 if not running
pid := d.InitPID()
if pid == -1 {
return 0
}
cg, err := d.cgroup(nil)
if err != nil {
return 0
}
if d.state.OS.CGInfo.Supports(cgroup.Pids, cg) {
value, err := cg.GetProcessesUsage()
if err != nil {
return -1
}
return value
}
pids := []int64{int64(pid)}
// Go through the pid list, adding new pids at the end so we go through them all
for i := 0; i < len(pids); i++ {
fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i])
fcont, err := ioutil.ReadFile(fname)
if err != nil {
// the process terminated during execution of this loop
continue
}
content := strings.Split(string(fcont), " ")
for j := 0; j < len(content); j++ {
pid, err := strconv.ParseInt(content[j], 10, 64)
if err == nil {
pids = append(pids, pid)
}
}
}
return int64(len(pids))
}
// getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
// function is called, the handle is cached internally in the lxc struct.
func (d *lxc) getStoragePool() (storagePools.Pool, error) {
if d.storagePool != nil {
return d.storagePool, nil
}
pool, err := storagePools.GetPoolByInstance(d.state, d)
if err != nil {
return nil, err
}
d.storagePool = pool
return d.storagePool, nil
}
// getStorageType returns the storage type of the instance's storage pool.
func (d *lxc) getStorageType() (string, error) {
pool, err := d.getStoragePool()
if err != nil {
return "", err
}
return pool.Driver().Info().Name, nil
}
// mount the instance's rootfs volume if needed.
func (d *lxc) mount() (*storagePools.MountInfo, error) {
pool, err := d.getStoragePool()
if err != nil {
return nil, err
}
if d.IsSnapshot() {
mountInfo, err := pool.MountInstanceSnapshot(d, nil)
if err != nil {
return nil, err
}
return mountInfo, nil
}
mountInfo, err := pool.MountInstance(d, nil)
if err != nil {
return nil, err
}
return mountInfo, nil
}
// unmount the instance's rootfs volume if needed.
func (d *lxc) unmount() (bool, error) {
pool, err := d.getStoragePool()
if err != nil {
return false, err
}
if d.IsSnapshot() {
unmounted, err := pool.UnmountInstanceSnapshot(d, nil)
if err != nil {
return false, err
}
return unmounted, nil
}
unmounted, err := pool.UnmountInstance(d, nil)
if err != nil {
return false, err
}
return unmounted, nil
}
// insertMountLXD inserts a mount into a LXD container.
// This function is used for the seccomp notifier and so cannot call any
// functions that would cause LXC to talk to the container's monitor. Otherwise
// we'll have a deadlock (with a timeout but still). The InitPID() call here is
// the exception since the seccomp notifier will make sure to always pass a
// valid PID.
func (d *lxc) insertMountLXD(source, target, fstype string, flags int, mntnsPID int, shiftfs bool) error {
pid := mntnsPID
if pid <= 0 {
// Get the init PID
pid = d.InitPID()
if pid == -1 {
// Container isn't running
return fmt.Errorf("Can't insert mount into stopped container")
}
}
// Create the temporary mount target
var tmpMount string
var err error
if shared.IsDir(source) {
tmpMount, err = ioutil.TempDir(d.ShmountsPath(), "lxdmount_")
if err != nil {
return fmt.Errorf("Failed to create shmounts path: %s", err)
}
} else {
f, err := ioutil.TempFile(d.ShmountsPath(), "lxdmount_")
if err != nil {
return fmt.Errorf("Failed to create shmounts path: %s", err)
}
tmpMount = f.Name()
f.Close()
}
defer os.Remove(tmpMount)
// Mount the filesystem
err = unix.Mount(source, tmpMount, fstype, uintptr(flags), "")
if err != nil {
return fmt.Errorf("Failed to setup temporary mount: %s", err)
}
defer unix.Unmount(tmpMount, unix.MNT_DETACH)
// Setup host side shiftfs as needed
if shiftfs {
err = unix.Mount(tmpMount, tmpMount, "shiftfs", 0, "mark,passthrough=3")
if err != nil {
return fmt.Errorf("Failed to setup host side shiftfs mount: %s", err)
}
defer unix.Unmount(tmpMount, unix.MNT_DETACH)
}
// Move the mount inside the container
mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
pidStr := fmt.Sprintf("%d", pid)
pidFdNr, pidFd := seccomp.MakePidFd(pid, d.state)
if pidFdNr >= 0 {
defer pidFd.Close()
}
_, err = shared.RunCommandInheritFds(
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkmount",
"lxd-mount",
"--",
pidStr,
fmt.Sprintf("%d", pidFdNr),
mntsrc,
target,
fmt.Sprintf("%v", shiftfs))
if err != nil {
return err
}
return nil
}
func (d *lxc) insertMountLXC(source, target, fstype string, flags int) error {
cname := project.Instance(d.Project(), d.Name())
configPath := filepath.Join(d.LogPath(), "lxc.conf")
if fstype == "" {
fstype = "none"
}
if !strings.HasPrefix(target, "/") {
target = "/" + target
}
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forkmount",
"lxc-mount",
"--",
cname,
d.state.OS.LxcPath,
configPath,
source,
target,
fstype,
fmt.Sprintf("%d", flags))
if err != nil {
return err
}
return nil
}
func (d *lxc) insertMount(source, target, fstype string, flags int, shiftfs bool) error {
if d.state.OS.LXCFeatures["mount_injection_file"] && !shiftfs {
return d.insertMountLXC(source, target, fstype, flags)
}
return d.insertMountLXD(source, target, fstype, flags, -1, shiftfs)
}
func (d *lxc) removeMount(mount string) error {
// Get the init PID
pid := d.InitPID()
if pid == -1 {
// Container isn't running
return fmt.Errorf("Can't remove mount from stopped container")
}
if d.state.OS.LXCFeatures["mount_injection_file"] {
configPath := filepath.Join(d.LogPath(), "lxc.conf")
cname := project.Instance(d.Project(), d.Name())
if !strings.HasPrefix(mount, "/") {
mount = "/" + mount
}
_, err := shared.RunCommand(
d.state.OS.ExecPath,
"forkmount",
"lxc-umount",
"--",
cname,
d.state.OS.LxcPath,
configPath,
mount)
if err != nil {
return err
}
} else {
// Remove the mount from the container
pidFdNr, pidFd := d.inheritInitPidFd()
if pidFdNr >= 0 {
defer pidFd.Close()
}
_, err := shared.RunCommandInheritFds(
[]*os.File{pidFd},
d.state.OS.ExecPath,
"forkmount",
"lxd-umount",
"--",
fmt.Sprintf("%d", pid),
fmt.Sprintf("%d", pidFdNr),
mount)
if err != nil {
return err
}
}
return nil
}
// InsertSeccompUnixDevice inserts a seccomp device.
func (d *lxc) InsertSeccompUnixDevice(prefix string, m deviceConfig.Device, pid int) error {
if pid < 0 {
return fmt.Errorf("Invalid request PID specified")
}
rootLink := fmt.Sprintf("/proc/%d/root", pid)
rootPath, err := os.Readlink(rootLink)
if err != nil {
return err
}
uid, gid, _, _, err := seccomp.TaskIDs(pid)
if err != nil {
return err
}
idmapset, err := d.CurrentIdmap()
if err != nil {
return err
}
nsuid, nsgid := idmapset.ShiftFromNs(uid, gid)
m["uid"] = fmt.Sprintf("%d", nsuid)
m["gid"] = fmt.Sprintf("%d", nsgid)
if !path.IsAbs(m["path"]) {
cwdLink := fmt.Sprintf("/proc/%d/cwd", pid)
prefixPath, err := os.Readlink(cwdLink)
if err != nil {
return err
}
prefixPath = strings.TrimPrefix(prefixPath, rootPath)
m["path"] = filepath.Join(rootPath, prefixPath, m["path"])
} else {
m["path"] = filepath.Join(rootPath, m["path"])
}
idmapSet, err := d.CurrentIdmap()
if err != nil {
return err
}
dev, err := device.UnixDeviceCreate(d.state, idmapSet, d.DevicesPath(), prefix, m, true)
if err != nil {
return fmt.Errorf("Failed to setup device: %s", err)
}
devPath := dev.HostPath
tgtPath := dev.RelativePath
// Bind-mount it into the container
defer os.Remove(devPath)
return d.insertMountLXD(devPath, tgtPath, "none", unix.MS_BIND, pid, false)
}
func (d *lxc) removeUnixDevices() error {
// Check that we indeed have devices to remove
if !shared.PathExists(d.DevicesPath()) {
return nil
}
// Load the directory listing
dents, err := ioutil.ReadDir(d.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices
for _, f := range dents {
// Skip non-Unix devices
if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
continue
}
// Remove the entry
devicePath := filepath.Join(d.DevicesPath(), f.Name())
err := os.Remove(devicePath)
if err != nil {
d.logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
}
}
return nil
}
// FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
// generated name and hwaddr properties if these are missing from the device.
func (d *lxc) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
var err error
newDevice := m.Clone()
// Function to try and guess an available name
nextInterfaceName := func() (string, error) {
devNames := []string{}
// Include all static interface names
for _, dev := range d.expandedDevices.Sorted() {
if dev.Config["name"] != "" && !shared.StringInSlice(dev.Config["name"], devNames) {
devNames = append(devNames, dev.Config["name"])
}
}
// Include all currently allocated interface names
for k, v := range d.expandedConfig {
if !strings.HasPrefix(k, shared.ConfigVolatilePrefix) {
continue
}
fields := strings.SplitN(k, ".", 3)
if len(fields) != 3 {
continue
}
if fields[2] != "name" || shared.StringInSlice(v, devNames) {
continue
}
devNames = append(devNames, v)
}
// Attempt to include all existing interfaces
cname := project.Instance(d.Project(), d.Name())
cc, err := liblxc.NewContainer(cname, d.state.OS.LxcPath)
if err == nil {
defer cc.Release()
interfaces, err := cc.Interfaces()
if err == nil {
for _, name := range interfaces {
if shared.StringInSlice(name, devNames) {
continue
}
devNames = append(devNames, name)
}
}
}
i := 0
name := ""
for {
if m["type"] == "infiniband" {
name = fmt.Sprintf("ib%d", i)
} else {
name = fmt.Sprintf("eth%d", i)
}
// Find a free device name
if !shared.StringInSlice(name, devNames) {
return name, nil
}
i++
}
}
nicType, err := nictype.NICType(d.state, d.Project(), m)
if err != nil {
return nil, err
}
// Fill in the MAC address.
if !shared.StringInSlice(nicType, []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
volatileHwaddr := d.localConfig[configKey]
if volatileHwaddr == "" {
// Generate a new MAC address.
volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
if err != nil || volatileHwaddr == "" {
return nil, errors.Wrapf(err, "Failed generating %q", configKey)
}
// Update the database and update volatileHwaddr with stored value.
volatileHwaddr, err = d.insertConfigkey(configKey, volatileHwaddr)
if err != nil {
return nil, errors.Wrapf(err, "Failed storing generated config key %q", configKey)
}
// Set stored value into current instance config.
d.localConfig[configKey] = volatileHwaddr
d.expandedConfig[configKey] = volatileHwaddr
}
if volatileHwaddr == "" {
return nil, fmt.Errorf("Failed getting %q", configKey)
}
newDevice["hwaddr"] = volatileHwaddr
}
// Fill in the interface name.
if m["name"] == "" {
configKey := fmt.Sprintf("volatile.%s.name", name)
volatileName := d.localConfig[configKey]
if volatileName == "" {
// Generate a new interface name.
volatileName, err = nextInterfaceName()
if err != nil || volatileName == "" {
return nil, errors.Wrapf(err, "Failed generating %q", configKey)
}
// Update the database and update volatileName with stored value.
volatileName, err = d.insertConfigkey(configKey, volatileName)
if err != nil {
return nil, errors.Wrapf(err, "Failed storing generated config key %q", configKey)
}
// Set stored value into current instance config.
d.localConfig[configKey] = volatileName
d.expandedConfig[configKey] = volatileName
}
if volatileName == "" {
return nil, fmt.Errorf("Failed getting %q", configKey)
}
newDevice["name"] = volatileName
}
return newDevice, nil
}
func (d *lxc) removeDiskDevices() error {
// Check that we indeed have devices to remove
if !shared.PathExists(d.DevicesPath()) {
return nil
}
// Load the directory listing
dents, err := ioutil.ReadDir(d.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices
for _, f := range dents {
// Skip non-disk devices
if !strings.HasPrefix(f.Name(), "disk.") {
continue
}
// Always try to unmount the host side
_ = unix.Unmount(filepath.Join(d.DevicesPath(), f.Name()), unix.MNT_DETACH)
// Remove the entry
diskPath := filepath.Join(d.DevicesPath(), f.Name())
err := os.Remove(diskPath)
if err != nil {
d.logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
}
}
return nil
}
// Network I/O limits
func (d *lxc) setNetworkPriority() error {
// Load the go-lxc struct.
err := d.initLXC(false)
if err != nil {
return err
}
// Load the cgroup struct.
cg, err := d.cgroup(nil)
if err != nil {
return err
}
// Check that the container is running
if !d.IsRunning() {
return fmt.Errorf("Can't set network priority on stopped container")
}
// Don't bother if the cgroup controller doesn't exist
if !d.state.OS.CGInfo.Supports(cgroup.NetPrio, cg) {
return nil
}
// Extract the current priority
networkPriority := d.expandedConfig["limits.network.priority"]
if networkPriority == "" {
networkPriority = "0"
}
networkInt, err := strconv.Atoi(networkPriority)
if err != nil {
return err
}
// Get all the interfaces
netifs, err := net.Interfaces()
if err != nil {
return err
}
// Check that we at least succeeded to set an entry
success := false
var lastError error
for _, netif := range netifs {
err = cg.SetNetIfPrio(fmt.Sprintf("%s %d", netif.Name, networkInt))
if err == nil {
success = true
} else {
lastError = err
}
}
if !success {
return fmt.Errorf("Failed to set network device priority: %s", lastError)
}
return nil
}
// IsFrozen returns if instance is frozen.
func (d *lxc) IsFrozen() bool {
return d.State() == "FROZEN"
}
// IsNesting returns if instance is nested.
func (d *lxc) IsNesting() bool {
return shared.IsTrue(d.expandedConfig["security.nesting"])
}
func (d *lxc) isCurrentlyPrivileged() bool {
if !d.IsRunning() {
return d.IsPrivileged()
}
idmap, err := d.CurrentIdmap()
if err != nil {
return d.IsPrivileged()
}
return idmap == nil
}
// IsPrivileged returns if instance is privileged.
func (d *lxc) IsPrivileged() bool {
return shared.IsTrue(d.expandedConfig["security.privileged"])
}
// IsRunning returns if instance is running.
func (d *lxc) IsRunning() bool {
state := d.State()
return state != "BROKEN" && state != "STOPPED"
}
// InitPID returns PID of init process.
func (d *lxc) InitPID() int {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return -1
}
return d.c.InitPid()
}
// InitPidFd returns pidfd of init process.
func (d *lxc) InitPidFd() (*os.File, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
return d.c.InitPidFd()
}
// DevptsFd returns dirfd of devpts mount.
func (d *lxc) DevptsFd() (*os.File, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
if !liblxc.HasApiExtension("devpts_fd") {
return nil, fmt.Errorf("Missing devpts_fd extension")
}
return d.c.DevptsFd()
}
// CurrentIdmap returns current IDMAP.
func (d *lxc) CurrentIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.idmap.current"]
if !ok {
return d.DiskIdmap()
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// DiskIdmap returns DISK IDMAP.
func (d *lxc) DiskIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.last_state.idmap"]
if !ok {
return nil, nil
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// NextIdmap returns next IDMAP.
func (d *lxc) NextIdmap() (*idmap.IdmapSet, error) {
jsonIdmap, ok := d.LocalConfig()["volatile.idmap.next"]
if !ok {
return d.CurrentIdmap()
}
return idmap.JSONUnmarshal(jsonIdmap)
}
// State returns instance state.
func (d *lxc) State() string {
state, err := d.getLxcState()
if err != nil {
return api.Error.String()
}
return state.String()
}
// LogFilePath log file path.
func (d *lxc) LogFilePath() string {
return filepath.Join(d.LogPath(), "lxc.log")
}
// StoragePool storage pool name.
func (d *lxc) StoragePool() (string, error) {
poolName, err := d.state.Cluster.GetInstancePool(d.Project(), d.Name())
if err != nil {
return "", err
}
return poolName, nil
}
// Internal MAAS handling.
func (d *lxc) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
interfaces := []maas.ContainerInterface{}
for k, m := range devices {
if m["type"] != "nic" {
continue
}
if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
continue
}
m, err := d.FillNetworkDevice(k, m)
if err != nil {
return nil, err
}
subnets := []maas.ContainerInterfaceSubnet{}
// IPv4
if m["maas.subnet.ipv4"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv4"],
Address: m["ipv4.address"],
}
subnets = append(subnets, subnet)
}
// IPv6
if m["maas.subnet.ipv6"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv6"],
Address: m["ipv6.address"],
}
subnets = append(subnets, subnet)
}
iface := maas.ContainerInterface{
Name: m["name"],
MACAddress: m["hwaddr"],
Subnets: subnets,
}
interfaces = append(interfaces, iface)
}
return interfaces, nil
}
func (d *lxc) maasUpdate(oldDevices map[string]map[string]string) error {
// Check if MAAS is configured
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
// Check if there's something that uses MAAS
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
var oldInterfaces []maas.ContainerInterface
if oldDevices != nil {
oldInterfaces, err = d.maasInterfaces(oldDevices)
if err != nil {
return err
}
}
if len(interfaces) == 0 && len(oldInterfaces) == 0 {
return nil
}
// See if we're connected to MAAS
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if exists {
if len(interfaces) == 0 && len(oldInterfaces) > 0 {
return d.state.MAAS.DeleteContainer(d)
}
return d.state.MAAS.UpdateContainer(d, interfaces)
}
return d.state.MAAS.CreateContainer(d, interfaces)
}
func (d *lxc) maasRename(newName string) error {
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if !exists {
return d.maasUpdate(nil)
}
return d.state.MAAS.RenameContainer(d, newName)
}
func (d *lxc) maasDelete() error {
maasURL, err := cluster.ConfigGetString(d.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := d.maasInterfaces(d.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if d.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := d.state.MAAS.DefinedContainer(d)
if err != nil {
return err
}
if !exists {
return nil
}
return d.state.MAAS.DeleteContainer(d)
}
func (d *lxc) CGroup() (*cgroup.CGroup, error) {
// Load the go-lxc struct
err := d.initLXC(false)
if err != nil {
return nil, err
}
return d.cgroup(nil)
}
func (d *lxc) cgroup(cc *liblxc.Container) (*cgroup.CGroup, error) {
rw := lxcCgroupReadWriter{}
if cc != nil {
rw.cc = cc
rw.conf = true
} else {
rw.cc = d.c
}
cg, err := cgroup.New(&rw)
if err != nil {
return nil, err
}
cg.UnifiedCapable = liblxc.HasApiExtension("cgroup2")
return cg, nil
}
type lxcCgroupReadWriter struct {
cc *liblxc.Container
conf bool
}
func (rw *lxcCgroupReadWriter) Get(version cgroup.Backend, controller string, key string) (string, error) {
if rw.conf {
lxcKey := fmt.Sprintf("lxc.cgroup.%s", key)
if version == cgroup.V2 {
lxcKey = fmt.Sprintf("lxc.cgroup2.%s", key)
}
return strings.Join(rw.cc.ConfigItem(lxcKey), "\n"), nil
}
return strings.Join(rw.cc.CgroupItem(key), "\n"), nil
}
func (rw *lxcCgroupReadWriter) Set(version cgroup.Backend, controller string, key string, value string) error {
if rw.conf {
if version == cgroup.V1 {
return lxcSetConfigItem(rw.cc, fmt.Sprintf("lxc.cgroup.%s", key), value)
}
return lxcSetConfigItem(rw.cc, fmt.Sprintf("lxc.cgroup2.%s", key), value)
}
return rw.cc.SetCgroupItem(key, value)
}
// UpdateBackupFile writes the instance's backup.yaml file to storage.
func (d *lxc) UpdateBackupFile() error {
pool, err := d.getStoragePool()
if err != nil {
return err
}
return pool.UpdateInstanceBackupFile(d, nil)
}
// SaveConfigFile generates the LXC config file on disk.
func (d *lxc) SaveConfigFile() error {
err := d.initLXC(true)
if err != nil {
return errors.Wrapf(err, "Failed to generate LXC config")
}
// Generate the LXC config.
configPath := filepath.Join(d.LogPath(), "lxc.conf")
err = d.c.SaveConfigFile(configPath)
if err != nil {
os.Remove(configPath)
return errors.Wrapf(err, "Failed to save LXC config to file %q", configPath)
}
return nil
}
// Info returns "lxc" and the currently loaded version of LXC
func (d *lxc) Info() instance.Info {
return instance.Info{
Name: "lxc",
Version: liblxc.Version(),
}
}
|
package drivers
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/rsync"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
)
// genericVFSGetResources is a generic GetResources implementation for VFS-only drivers.
func genericVFSGetResources(d Driver) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(GetPoolMountPath(d.Name()))
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// genericVFSRenameVolume is a generic RenameVolume implementation for VFS-only drivers.
func genericVFSRenameVolume(d Driver, vol Volume, newVolName string, op *operations.Operation) error {
if vol.IsSnapshot() {
return fmt.Errorf("Volume must not be a snapshot")
}
// Rename the volume itself.
srcVolumePath := GetVolumeMountPath(d.Name(), vol.volType, vol.name)
dstVolumePath := GetVolumeMountPath(d.Name(), vol.volType, newVolName)
revertRename := true
if shared.PathExists(srcVolumePath) {
err := os.Rename(srcVolumePath, dstVolumePath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", srcVolumePath, dstVolumePath)
}
defer func() {
if !revertRename {
return
}
os.Rename(dstVolumePath, srcVolumePath)
}()
}
// And if present, the snapshots too.
srcSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
dstSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcSnapshotDir) {
err := os.Rename(srcSnapshotDir, dstSnapshotDir)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", srcSnapshotDir, dstSnapshotDir)
}
}
revertRename = false
return nil
}
// genericVFSVolumeSnapshots is a generic VolumeSnapshots implementation for VFS-only drivers.
func genericVFSVolumeSnapshots(d Driver, vol Volume, op *operations.Operation) ([]string, error) {
snapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
snapshots := []string{}
ents, err := ioutil.ReadDir(snapshotDir)
if err != nil {
// If the snapshots directory doesn't exist, there are no snapshots.
if os.IsNotExist(err) {
return snapshots, nil
}
return nil, errors.Wrapf(err, "Failed to list directory '%s'", snapshotDir)
}
for _, ent := range ents {
fileInfo, err := os.Stat(filepath.Join(snapshotDir, ent.Name()))
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
continue
}
snapshots = append(snapshots, ent.Name())
}
return snapshots, nil
}
// genericVFSRenameVolumeSnapshot is a generic RenameVolumeSnapshot implementation for VFS-only drivers.
func genericVFSRenameVolumeSnapshot(d Driver, snapVol Volume, newSnapshotName string, op *operations.Operation) error {
if !snapVol.IsSnapshot() {
return fmt.Errorf("Volume must be a snapshot")
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldPath := snapVol.MountPath()
newPath := GetVolumeMountPath(d.Name(), snapVol.volType, GetSnapshotVolumeName(parentName, newSnapshotName))
err := os.Rename(oldPath, newPath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", oldPath, newPath)
}
return nil
}
// genericVFSMigrateVolume is a generic MigrateVolume implementation for VFS-only drivers.
func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
bwlimit := d.Config()["rsync.bwlimit"]
var rsyncArgs []string
// For VM volumes, if the root volume disk path is a file image in the volume's mount path then exclude it
// from being transferred via rsync during the filesystem volume transfer, as it will be transferred later
// using a different method.
if vol.IsVMBlock() {
if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
diskPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
if strings.HasPrefix(diskPath, vol.MountPath()) {
rsyncArgs = []string{"--exclude", filepath.Base(diskPath)}
}
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
// Define function to send a filesystem volume.
sendFSVol := func(vol Volume, conn io.ReadWriteCloser, mountPath string) error {
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
path := shared.AddSlash(mountPath)
d.Logger().Debug("Sending filesystem volume", log.Ctx{"volName": vol.name, "path": path})
return rsync.Send(vol.name, path, conn, wrapper, volSrcArgs.MigrationType.Features, bwlimit, s.OS.ExecPath, rsyncArgs...)
}
// Define function to send a block volume.
sendBlockVol := func(vol Volume, conn io.ReadWriteCloser) error {
// Close when done to indicate to target side we are finished sending this volume.
defer conn.Close()
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", vol.name)
}
path, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
from, err := os.Open(path)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", path)
}
defer from.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(from)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Sending block volume", log.Ctx{"volName": vol.name, "path": path})
_, err = io.Copy(conn, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying %q to migration connection", path)
}
return nil
}
// Send all snapshots to target.
for _, snapName := range volSrcArgs.Snapshots {
snapshot, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
// Send snapshot to target (ensure local snapshot volume is mounted if needed).
err = snapshot.MountTask(func(mountPath string, op *operations.Operation) error {
err := sendFSVol(snapshot, conn, mountPath)
if err != nil {
return err
}
if vol.IsVMBlock() {
err = sendBlockVol(snapshot, conn)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
}
// Send volume to target (ensure local volume is mounted if needed).
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
err := sendFSVol(vol, conn, mountPath)
if err != nil {
return err
}
if vol.IsVMBlock() {
err = sendBlockVol(vol, conn)
if err != nil {
return err
}
}
return nil
}, op)
}
// genericVFSCreateVolumeFromMigration receives a volume and its snapshots over a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Check migration transport type matches volume type.
if vol.IsVMBlock() {
if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !volTargetArgs.Refresh {
err := d.CreateVolume(vol, preFiller, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
recvFSVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", volName)
}
d.Logger().Debug("Receiving filesystem volume", log.Ctx{"volName": volName, "path": path})
return rsync.Recv(path, conn, wrapper, volTargetArgs.MigrationType.Features)
}
recvBlockVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", volName)
}
to, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", path)
}
defer to.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(conn)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Receiving block volume", log.Ctx{"volName": volName, "path": path})
_, err = io.Copy(to, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying from migration connection to %q", path)
}
return nil
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
var err error
// Setup paths to the main volume. We will receive each snapshot to these paths and then create
// a snapshot of the main volume for each one.
path := shared.AddSlash(mountPath)
pathBlock := ""
if vol.IsVMBlock() {
pathBlock, err = d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
}
// Snapshots are sent first by the sender, so create these first.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapshotName, vol.config, vol.poolConfig)
// Receive the filesystem snapshot first (as it is sent first).
err = recvFSVol(snapVol.name, conn, path)
if err != nil {
return err
}
// Receive the block snapshot next (if needed).
if vol.IsVMBlock() {
err = recvBlockVol(snapVol.name, conn, pathBlock)
if err != nil {
return err
}
}
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Receive main volume.
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
// Receive the final main volume sync if needed.
if volTargetArgs.Live {
d.Logger().Debug("Starting main volume final sync", log.Ctx{"volName": vol.name, "path": path})
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Run EnsureMountPath after mounting and syncing to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Receive the block volume next (if needed).
if vol.IsVMBlock() {
err = recvBlockVol(vol.name, conn, pathBlock)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// genericVFSHasVolume is a generic HasVolume implementation for VFS-only drivers.
func genericVFSHasVolume(vol Volume) bool {
if shared.PathExists(vol.MountPath()) {
return true
}
return false
}
// genericVFSGetVolumeDiskPath is a generic GetVolumeDiskPath implementation for VFS-only drivers.
func genericVFSGetVolumeDiskPath(vol Volume) (string, error) {
if vol.contentType != ContentTypeBlock {
return "", ErrNotSupported
}
return filepath.Join(vol.MountPath(), "root.img"), nil
}
// genericVFSBackupVolume is a generic BackupVolume implementation for VFS-only drivers.
func genericVFSBackupVolume(d Driver, vol Volume, tarWriter *instancewriter.InstanceTarWriter, snapshots bool, op *operations.Operation) error {
// Define a function that can copy a volume into the backup target location.
backupVolume := func(v Volume, prefix string) error {
return v.MountTask(func(mountPath string, op *operations.Operation) error {
// Reset hard link cache as we are copying a new volume (instance or snapshot).
tarWriter.ResetHardLinkMap()
if v.IsVMBlock() {
blockPath, err := d.GetVolumeDiskPath(v)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
var blockDiskSize int64
var exclude []string
if shared.IsBlockdevPath(blockPath) {
// Get size of disk block device for tarball header.
blockDiskSize, err = BlockDevSizeBytes(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block device size %q", blockPath)
}
} else {
// Get size of disk image file for tarball header.
fi, err := os.Lstat(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block file size %q", blockPath)
}
blockDiskSize = fi.Size()
// Exclude the VM root disk path from the config volume backup part.
// We will read it as a block device later instead.
exclude = append(exclude, blockPath)
}
d.Logger().Debug("Copying virtual machine config volume", log.Ctx{"sourcePath": mountPath, "prefix": prefix})
err = filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip any exluded files.
if shared.StringInSlice(srcPath, exclude) {
return nil
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
err = tarWriter.WriteFile(name, srcPath, fi, false)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
if err != nil {
return err
}
name := fmt.Sprintf("%s.img", prefix)
d.Logger().Debug("Copying virtual machine block volume", log.Ctx{"sourcePath": blockPath, "file": name, "size": blockDiskSize})
from, err := os.Open(blockPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", blockPath)
}
defer from.Close()
fi := instancewriter.FileInfo{
FileName: name,
FileSize: blockDiskSize,
FileMode: 0600,
FileModTime: time.Now(),
}
err = tarWriter.WriteFileFromReader(from, &fi)
if err != nil {
return errors.Wrapf(err, "Error copying %q as %q to tarball", blockPath, name)
}
} else {
d.Logger().Debug("Copying container filesystem volume", log.Ctx{"sourcePath": mountPath, "prefix": prefix})
return filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
logger.Warnf("File vanished during export: %q, skipping", srcPath)
return nil
}
return errors.Wrapf(err, "Error walking file during export: %q", srcPath)
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
// Write the file to the tarball with ignoreGrowth enabled so that if the
// source file grows during copy we only copy up to the original size.
// This means that the file in the tarball may be inconsistent.
err = tarWriter.WriteFile(name, srcPath, fi, true)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
}
return nil
}, op)
}
// Handle snapshots.
if snapshots {
snapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
snapshotsPrefix = "backup/virtual-machine-snapshots"
}
// List the snapshots.
snapshots, err := vol.Snapshots(op)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name())
prefix := filepath.Join(snapshotsPrefix, snapName)
err := backupVolume(snapshot, prefix)
if err != nil {
return err
}
}
}
// Copy the main volume itself.
prefix := "backup/container"
if vol.IsVMBlock() {
prefix = "backup/virtual-machine"
}
err := backupVolume(vol, prefix)
if err != nil {
return err
}
return nil
}
// genericVFSBackupUnpack unpacks a non-optimized backup tarball through a storage driver.
// Returns a post hook function that should be called once the database entries for the restored backup have been
// created and a revert function that can be used to undo the actions this function performs should something
// subsequently fail.
func genericVFSBackupUnpack(d Driver, vol Volume, snapshots []string, srcData io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), error) {
// Define function to unpack a volume from a backup tarball file.
unpackVolume := func(r io.ReadSeeker, tarArgs []string, unpacker []string, srcPrefix string, mountPath string) error {
volTypeName := "container"
if vol.IsVMBlock() {
volTypeName = "virtual machine"
}
// Clear the volume ready for unpack.
err := wipeDirectory(mountPath)
if err != nil {
return errors.Wrapf(err, "Error clearing volume before unpack")
}
// Prepare tar arguments.
srcParts := strings.Split(srcPrefix, string(os.PathSeparator))
args := append(tarArgs, []string{
"-",
"--xattrs-include=*",
fmt.Sprintf("--strip-components=%d", len(srcParts)),
"-C", mountPath, srcPrefix,
}...)
// Extract filesystem volume.
d.Logger().Debug(fmt.Sprintf("Unpacking %s filesystem volume", volTypeName), log.Ctx{"source": srcPrefix, "target": mountPath})
srcData.Seek(0, 0)
err = shared.RunCommandWithFds(r, nil, "tar", args...)
if err != nil {
return errors.Wrapf(err, "Error starting unpack")
}
// Extract block file to block volume if VM.
if vol.IsVMBlock() {
targetPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
srcFile := fmt.Sprintf("%s.img", srcPrefix)
d.Logger().Debug("Unpacking virtual machine block volume", log.Ctx{"source": srcFile, "target": targetPath})
tr, cancelFunc, err := shared.CompressedTarReader(context.Background(), r, unpacker)
if err != nil {
return err
}
defer cancelFunc()
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
if hdr.Name == srcFile {
// Open block file (use O_CREATE to support drivers that use image files).
to, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", targetPath)
}
defer to.Close()
// Restore original size of volume from raw block backup file size.
d.Logger().Debug("Setting volume size from source", log.Ctx{"source": srcFile, "target": targetPath, "size": hdr.Size})
// Allow potentially destructive resize of volume as we are going to be
// overwriting it entirely anyway. This allows shrinking of block volumes.
vol.allowUnsafeResize = true
err = d.SetVolumeQuota(vol, fmt.Sprintf("%d", hdr.Size), op)
if err != nil {
return err
}
_, err = io.Copy(to, tr)
if err != nil {
return err
}
cancelFunc()
return nil
}
}
return fmt.Errorf("Could not find %q", srcFile)
}
return nil
}
revert := revert.New()
defer revert.Fail()
// Find the compression algorithm used for backup source data.
srcData.Seek(0, 0)
tarArgs, _, unpacker, err := shared.DetectCompressionFile(srcData)
if err != nil {
return nil, nil, err
}
if d.HasVolume(vol) {
return nil, nil, fmt.Errorf("Cannot restore volume, already exists on target")
}
// Create new empty volume.
err = d.CreateVolume(vol, nil, nil)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
if len(snapshots) > 0 {
// Create new snapshots directory.
err := createParentSnapshotDirIfMissing(d.Name(), vol.volType, vol.name)
if err != nil {
return nil, nil, err
}
}
backupSnapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
backupSnapshotsPrefix = "backup/virtual-machine-snapshots"
}
for _, snapName := range snapshots {
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
backupSnapshotPrefix := fmt.Sprintf("%s/%s", backupSnapshotsPrefix, snapName)
return unpackVolume(srcData, tarArgs, unpacker, backupSnapshotPrefix, mountPath)
}, op)
if err != nil {
return nil, nil, err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return nil, nil, err
}
d.Logger().Debug("Creating volume snapshot", log.Ctx{"snapshotName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
}
// Mount main volume and leave mounted (as is needed during backup.yaml generation during latter parts of
// the backup restoration process).
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return nil, nil, err
}
// Create a post hook function that will be called at the end of the backup restore process to unmount
// the volume if needed.
postHook := func(vol Volume) error {
if ourMount {
d.UnmountVolume(vol, op)
}
return nil
}
backupPrefix := "backup/container"
if vol.IsVMBlock() {
backupPrefix = "backup/virtual-machine"
}
mountPath := vol.MountPath()
err = unpackVolume(srcData, tarArgs, unpacker, backupPrefix, mountPath)
if err != nil {
return nil, nil, err
}
// Run EnsureMountPath after mounting and unpacking to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return nil, nil, err
}
revertExternal := revert.Clone() // Clone before calling revert.Success() so we can return the Fail func.
revert.Success()
return postHook, revertExternal.Fail, nil
}
// genericVFSResizeBlockFile resizes an existing block file to the specified size. Returns true if resize took
// place, false if not. Both requested size and existing file size are rounded to nearest block size using
// roundVolumeBlockFileSizeBytes() before decision whether to resize is taken.
func genericVFSResizeBlockFile(filePath string, sizeBytes int64) (bool, error) {
if sizeBytes <= 0 {
return false, fmt.Errorf("Size cannot be zero")
}
fi, err := os.Stat(filePath)
if err != nil {
return false, err
}
oldSizeBytes := fi.Size()
// Round the supplied size the same way the block files created are so its accurate comparison.
newSizeBytes, err := roundVolumeBlockFileSizeBytes(sizeBytes)
if err != nil {
return false, err
}
if newSizeBytes < oldSizeBytes {
return false, errors.Wrap(ErrCannotBeShrunk, "You cannot shrink block volumes")
}
if newSizeBytes == oldSizeBytes {
return false, nil
}
// Resize block file.
err = ensureVolumeBlockFile(filePath, sizeBytes)
if err != nil {
return false, err
}
return true, nil
}
// genericVFSCopyVolume copies a volume and its snapshots using a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, srcVol Volume, srcSnapshots []Volume, refresh bool, op *operations.Operation) error {
if vol.contentType != srcVol.contentType {
return fmt.Errorf("Content type of source and target must be the same")
}
bwlimit := d.Config()["rsync.bwlimit"]
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !refresh {
err := d.CreateVolume(vol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
// If copying snapshots is indicated, check the source isn't itself a snapshot.
if len(srcSnapshots) > 0 && !srcVol.IsSnapshot() {
for _, srcSnapshot := range srcSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.name)
// Mount the source snapshot.
err := srcSnapshot.MountTask(func(srcMountPath string, op *operations.Operation) error {
// Copy the snapshot.
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcSnapshot.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcSnapshot)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
fullSnapName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapName, vol.config, vol.poolConfig)
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Copy source to destination (mounting each volume if needed).
err := srcVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcVol.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
// Run EnsureMountPath after mounting and copying to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
lxd/storage/drivers/geneirc/vfs: Removes genericVFSResizeBlockFile
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package drivers
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/rsync"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
)
// genericVFSGetResources is a generic GetResources implementation for VFS-only drivers.
func genericVFSGetResources(d Driver) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(GetPoolMountPath(d.Name()))
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// genericVFSRenameVolume is a generic RenameVolume implementation for VFS-only drivers.
func genericVFSRenameVolume(d Driver, vol Volume, newVolName string, op *operations.Operation) error {
if vol.IsSnapshot() {
return fmt.Errorf("Volume must not be a snapshot")
}
// Rename the volume itself.
srcVolumePath := GetVolumeMountPath(d.Name(), vol.volType, vol.name)
dstVolumePath := GetVolumeMountPath(d.Name(), vol.volType, newVolName)
revertRename := true
if shared.PathExists(srcVolumePath) {
err := os.Rename(srcVolumePath, dstVolumePath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", srcVolumePath, dstVolumePath)
}
defer func() {
if !revertRename {
return
}
os.Rename(dstVolumePath, srcVolumePath)
}()
}
// And if present, the snapshots too.
srcSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
dstSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcSnapshotDir) {
err := os.Rename(srcSnapshotDir, dstSnapshotDir)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", srcSnapshotDir, dstSnapshotDir)
}
}
revertRename = false
return nil
}
// genericVFSVolumeSnapshots is a generic VolumeSnapshots implementation for VFS-only drivers.
func genericVFSVolumeSnapshots(d Driver, vol Volume, op *operations.Operation) ([]string, error) {
snapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
snapshots := []string{}
ents, err := ioutil.ReadDir(snapshotDir)
if err != nil {
// If the snapshots directory doesn't exist, there are no snapshots.
if os.IsNotExist(err) {
return snapshots, nil
}
return nil, errors.Wrapf(err, "Failed to list directory '%s'", snapshotDir)
}
for _, ent := range ents {
fileInfo, err := os.Stat(filepath.Join(snapshotDir, ent.Name()))
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
continue
}
snapshots = append(snapshots, ent.Name())
}
return snapshots, nil
}
// genericVFSRenameVolumeSnapshot is a generic RenameVolumeSnapshot implementation for VFS-only drivers.
func genericVFSRenameVolumeSnapshot(d Driver, snapVol Volume, newSnapshotName string, op *operations.Operation) error {
if !snapVol.IsSnapshot() {
return fmt.Errorf("Volume must be a snapshot")
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldPath := snapVol.MountPath()
newPath := GetVolumeMountPath(d.Name(), snapVol.volType, GetSnapshotVolumeName(parentName, newSnapshotName))
err := os.Rename(oldPath, newPath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", oldPath, newPath)
}
return nil
}
// genericVFSMigrateVolume is a generic MigrateVolume implementation for VFS-only drivers.
func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
bwlimit := d.Config()["rsync.bwlimit"]
var rsyncArgs []string
// For VM volumes, if the root volume disk path is a file image in the volume's mount path then exclude it
// from being transferred via rsync during the filesystem volume transfer, as it will be transferred later
// using a different method.
if vol.IsVMBlock() {
if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
diskPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
if strings.HasPrefix(diskPath, vol.MountPath()) {
rsyncArgs = []string{"--exclude", filepath.Base(diskPath)}
}
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
// Define function to send a filesystem volume.
sendFSVol := func(vol Volume, conn io.ReadWriteCloser, mountPath string) error {
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
path := shared.AddSlash(mountPath)
d.Logger().Debug("Sending filesystem volume", log.Ctx{"volName": vol.name, "path": path})
return rsync.Send(vol.name, path, conn, wrapper, volSrcArgs.MigrationType.Features, bwlimit, s.OS.ExecPath, rsyncArgs...)
}
// Define function to send a block volume.
sendBlockVol := func(vol Volume, conn io.ReadWriteCloser) error {
// Close when done to indicate to target side we are finished sending this volume.
defer conn.Close()
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", vol.name)
}
path, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
from, err := os.Open(path)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", path)
}
defer from.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(from)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Sending block volume", log.Ctx{"volName": vol.name, "path": path})
_, err = io.Copy(conn, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying %q to migration connection", path)
}
return nil
}
// Send all snapshots to target.
for _, snapName := range volSrcArgs.Snapshots {
snapshot, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
// Send snapshot to target (ensure local snapshot volume is mounted if needed).
err = snapshot.MountTask(func(mountPath string, op *operations.Operation) error {
err := sendFSVol(snapshot, conn, mountPath)
if err != nil {
return err
}
if vol.IsVMBlock() {
err = sendBlockVol(snapshot, conn)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
}
// Send volume to target (ensure local volume is mounted if needed).
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
err := sendFSVol(vol, conn, mountPath)
if err != nil {
return err
}
if vol.IsVMBlock() {
err = sendBlockVol(vol, conn)
if err != nil {
return err
}
}
return nil
}, op)
}
// genericVFSCreateVolumeFromMigration receives a volume and its snapshots over a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Check migration transport type matches volume type.
if vol.IsVMBlock() {
if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !volTargetArgs.Refresh {
err := d.CreateVolume(vol, preFiller, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
recvFSVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", volName)
}
d.Logger().Debug("Receiving filesystem volume", log.Ctx{"volName": volName, "path": path})
return rsync.Recv(path, conn, wrapper, volTargetArgs.MigrationType.Features)
}
recvBlockVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", volName)
}
to, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", path)
}
defer to.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(conn)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Receiving block volume", log.Ctx{"volName": volName, "path": path})
_, err = io.Copy(to, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying from migration connection to %q", path)
}
return nil
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
var err error
// Setup paths to the main volume. We will receive each snapshot to these paths and then create
// a snapshot of the main volume for each one.
path := shared.AddSlash(mountPath)
pathBlock := ""
if vol.IsVMBlock() {
pathBlock, err = d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
}
// Snapshots are sent first by the sender, so create these first.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapshotName, vol.config, vol.poolConfig)
// Receive the filesystem snapshot first (as it is sent first).
err = recvFSVol(snapVol.name, conn, path)
if err != nil {
return err
}
// Receive the block snapshot next (if needed).
if vol.IsVMBlock() {
err = recvBlockVol(snapVol.name, conn, pathBlock)
if err != nil {
return err
}
}
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Receive main volume.
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
// Receive the final main volume sync if needed.
if volTargetArgs.Live {
d.Logger().Debug("Starting main volume final sync", log.Ctx{"volName": vol.name, "path": path})
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Run EnsureMountPath after mounting and syncing to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Receive the block volume next (if needed).
if vol.IsVMBlock() {
err = recvBlockVol(vol.name, conn, pathBlock)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// genericVFSHasVolume is a generic HasVolume implementation for VFS-only drivers.
func genericVFSHasVolume(vol Volume) bool {
if shared.PathExists(vol.MountPath()) {
return true
}
return false
}
// genericVFSGetVolumeDiskPath is a generic GetVolumeDiskPath implementation for VFS-only drivers.
func genericVFSGetVolumeDiskPath(vol Volume) (string, error) {
if vol.contentType != ContentTypeBlock {
return "", ErrNotSupported
}
return filepath.Join(vol.MountPath(), "root.img"), nil
}
// genericVFSBackupVolume is a generic BackupVolume implementation for VFS-only drivers.
func genericVFSBackupVolume(d Driver, vol Volume, tarWriter *instancewriter.InstanceTarWriter, snapshots bool, op *operations.Operation) error {
// Define a function that can copy a volume into the backup target location.
backupVolume := func(v Volume, prefix string) error {
return v.MountTask(func(mountPath string, op *operations.Operation) error {
// Reset hard link cache as we are copying a new volume (instance or snapshot).
tarWriter.ResetHardLinkMap()
if v.IsVMBlock() {
blockPath, err := d.GetVolumeDiskPath(v)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
var blockDiskSize int64
var exclude []string
if shared.IsBlockdevPath(blockPath) {
// Get size of disk block device for tarball header.
blockDiskSize, err = BlockDevSizeBytes(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block device size %q", blockPath)
}
} else {
// Get size of disk image file for tarball header.
fi, err := os.Lstat(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block file size %q", blockPath)
}
blockDiskSize = fi.Size()
// Exclude the VM root disk path from the config volume backup part.
// We will read it as a block device later instead.
exclude = append(exclude, blockPath)
}
d.Logger().Debug("Copying virtual machine config volume", log.Ctx{"sourcePath": mountPath, "prefix": prefix})
err = filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip any exluded files.
if shared.StringInSlice(srcPath, exclude) {
return nil
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
err = tarWriter.WriteFile(name, srcPath, fi, false)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
if err != nil {
return err
}
name := fmt.Sprintf("%s.img", prefix)
d.Logger().Debug("Copying virtual machine block volume", log.Ctx{"sourcePath": blockPath, "file": name, "size": blockDiskSize})
from, err := os.Open(blockPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", blockPath)
}
defer from.Close()
fi := instancewriter.FileInfo{
FileName: name,
FileSize: blockDiskSize,
FileMode: 0600,
FileModTime: time.Now(),
}
err = tarWriter.WriteFileFromReader(from, &fi)
if err != nil {
return errors.Wrapf(err, "Error copying %q as %q to tarball", blockPath, name)
}
} else {
d.Logger().Debug("Copying container filesystem volume", log.Ctx{"sourcePath": mountPath, "prefix": prefix})
return filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
logger.Warnf("File vanished during export: %q, skipping", srcPath)
return nil
}
return errors.Wrapf(err, "Error walking file during export: %q", srcPath)
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
// Write the file to the tarball with ignoreGrowth enabled so that if the
// source file grows during copy we only copy up to the original size.
// This means that the file in the tarball may be inconsistent.
err = tarWriter.WriteFile(name, srcPath, fi, true)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
}
return nil
}, op)
}
// Handle snapshots.
if snapshots {
snapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
snapshotsPrefix = "backup/virtual-machine-snapshots"
}
// List the snapshots.
snapshots, err := vol.Snapshots(op)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name())
prefix := filepath.Join(snapshotsPrefix, snapName)
err := backupVolume(snapshot, prefix)
if err != nil {
return err
}
}
}
// Copy the main volume itself.
prefix := "backup/container"
if vol.IsVMBlock() {
prefix = "backup/virtual-machine"
}
err := backupVolume(vol, prefix)
if err != nil {
return err
}
return nil
}
// genericVFSBackupUnpack unpacks a non-optimized backup tarball through a storage driver.
// Returns a post hook function that should be called once the database entries for the restored backup have been
// created and a revert function that can be used to undo the actions this function performs should something
// subsequently fail.
func genericVFSBackupUnpack(d Driver, vol Volume, snapshots []string, srcData io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), error) {
// Define function to unpack a volume from a backup tarball file.
unpackVolume := func(r io.ReadSeeker, tarArgs []string, unpacker []string, srcPrefix string, mountPath string) error {
volTypeName := "container"
if vol.IsVMBlock() {
volTypeName = "virtual machine"
}
// Clear the volume ready for unpack.
err := wipeDirectory(mountPath)
if err != nil {
return errors.Wrapf(err, "Error clearing volume before unpack")
}
// Prepare tar arguments.
srcParts := strings.Split(srcPrefix, string(os.PathSeparator))
args := append(tarArgs, []string{
"-",
"--xattrs-include=*",
fmt.Sprintf("--strip-components=%d", len(srcParts)),
"-C", mountPath, srcPrefix,
}...)
// Extract filesystem volume.
d.Logger().Debug(fmt.Sprintf("Unpacking %s filesystem volume", volTypeName), log.Ctx{"source": srcPrefix, "target": mountPath})
srcData.Seek(0, 0)
err = shared.RunCommandWithFds(r, nil, "tar", args...)
if err != nil {
return errors.Wrapf(err, "Error starting unpack")
}
// Extract block file to block volume if VM.
if vol.IsVMBlock() {
targetPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
srcFile := fmt.Sprintf("%s.img", srcPrefix)
d.Logger().Debug("Unpacking virtual machine block volume", log.Ctx{"source": srcFile, "target": targetPath})
tr, cancelFunc, err := shared.CompressedTarReader(context.Background(), r, unpacker)
if err != nil {
return err
}
defer cancelFunc()
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
if hdr.Name == srcFile {
// Open block file (use O_CREATE to support drivers that use image files).
to, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", targetPath)
}
defer to.Close()
// Restore original size of volume from raw block backup file size.
d.Logger().Debug("Setting volume size from source", log.Ctx{"source": srcFile, "target": targetPath, "size": hdr.Size})
// Allow potentially destructive resize of volume as we are going to be
// overwriting it entirely anyway. This allows shrinking of block volumes.
vol.allowUnsafeResize = true
err = d.SetVolumeQuota(vol, fmt.Sprintf("%d", hdr.Size), op)
if err != nil {
return err
}
_, err = io.Copy(to, tr)
if err != nil {
return err
}
cancelFunc()
return nil
}
}
return fmt.Errorf("Could not find %q", srcFile)
}
return nil
}
revert := revert.New()
defer revert.Fail()
// Find the compression algorithm used for backup source data.
srcData.Seek(0, 0)
tarArgs, _, unpacker, err := shared.DetectCompressionFile(srcData)
if err != nil {
return nil, nil, err
}
if d.HasVolume(vol) {
return nil, nil, fmt.Errorf("Cannot restore volume, already exists on target")
}
// Create new empty volume.
err = d.CreateVolume(vol, nil, nil)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
if len(snapshots) > 0 {
// Create new snapshots directory.
err := createParentSnapshotDirIfMissing(d.Name(), vol.volType, vol.name)
if err != nil {
return nil, nil, err
}
}
backupSnapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
backupSnapshotsPrefix = "backup/virtual-machine-snapshots"
}
for _, snapName := range snapshots {
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
backupSnapshotPrefix := fmt.Sprintf("%s/%s", backupSnapshotsPrefix, snapName)
return unpackVolume(srcData, tarArgs, unpacker, backupSnapshotPrefix, mountPath)
}, op)
if err != nil {
return nil, nil, err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return nil, nil, err
}
d.Logger().Debug("Creating volume snapshot", log.Ctx{"snapshotName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
}
// Mount main volume and leave mounted (as is needed during backup.yaml generation during latter parts of
// the backup restoration process).
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return nil, nil, err
}
// Create a post hook function that will be called at the end of the backup restore process to unmount
// the volume if needed.
postHook := func(vol Volume) error {
if ourMount {
d.UnmountVolume(vol, op)
}
return nil
}
backupPrefix := "backup/container"
if vol.IsVMBlock() {
backupPrefix = "backup/virtual-machine"
}
mountPath := vol.MountPath()
err = unpackVolume(srcData, tarArgs, unpacker, backupPrefix, mountPath)
if err != nil {
return nil, nil, err
}
// Run EnsureMountPath after mounting and unpacking to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return nil, nil, err
}
revertExternal := revert.Clone() // Clone before calling revert.Success() so we can return the Fail func.
revert.Success()
return postHook, revertExternal.Fail, nil
}
// genericVFSCopyVolume copies a volume and its snapshots using a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, srcVol Volume, srcSnapshots []Volume, refresh bool, op *operations.Operation) error {
if vol.contentType != srcVol.contentType {
return fmt.Errorf("Content type of source and target must be the same")
}
bwlimit := d.Config()["rsync.bwlimit"]
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !refresh {
err := d.CreateVolume(vol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
// If copying snapshots is indicated, check the source isn't itself a snapshot.
if len(srcSnapshots) > 0 && !srcVol.IsSnapshot() {
for _, srcSnapshot := range srcSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.name)
// Mount the source snapshot.
err := srcSnapshot.MountTask(func(srcMountPath string, op *operations.Operation) error {
// Copy the snapshot.
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcSnapshot.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcSnapshot)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
fullSnapName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapName, vol.config, vol.poolConfig)
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Copy source to destination (mounting each volume if needed).
err := srcVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcVol.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
// Run EnsureMountPath after mounting and copying to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
|
package drivers
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/rsync"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
)
// genericVolumeDiskFile used to indicate the file name used for block volume disk files.
const genericVolumeDiskFile = "root.img"
// genericVFSGetResources is a generic GetResources implementation for VFS-only drivers.
func genericVFSGetResources(d Driver) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(GetPoolMountPath(d.Name()))
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// genericVFSRenameVolume is a generic RenameVolume implementation for VFS-only drivers.
func genericVFSRenameVolume(d Driver, vol Volume, newVolName string, op *operations.Operation) error {
if vol.IsSnapshot() {
return fmt.Errorf("Volume must not be a snapshot")
}
revert := revert.New()
defer revert.Fail()
// Rename the volume itself.
srcVolumePath := GetVolumeMountPath(d.Name(), vol.volType, vol.name)
dstVolumePath := GetVolumeMountPath(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcVolumePath) {
err := os.Rename(srcVolumePath, dstVolumePath)
if err != nil {
return errors.Wrapf(err, "Failed to rename %q to %q", srcVolumePath, dstVolumePath)
}
revert.Add(func() { os.Rename(dstVolumePath, srcVolumePath) })
}
// And if present, the snapshots too.
srcSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
dstSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcSnapshotDir) {
err := os.Rename(srcSnapshotDir, dstSnapshotDir)
if err != nil {
return errors.Wrapf(err, "Failed to rename %q to %q", srcSnapshotDir, dstSnapshotDir)
}
revert.Add(func() { os.Rename(dstSnapshotDir, srcSnapshotDir) })
}
revert.Success()
return nil
}
// genericVFSVolumeSnapshots is a generic VolumeSnapshots implementation for VFS-only drivers.
func genericVFSVolumeSnapshots(d Driver, vol Volume, op *operations.Operation) ([]string, error) {
snapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
snapshots := []string{}
ents, err := ioutil.ReadDir(snapshotDir)
if err != nil {
// If the snapshots directory doesn't exist, there are no snapshots.
if os.IsNotExist(err) {
return snapshots, nil
}
return nil, errors.Wrapf(err, "Failed to list directory '%s'", snapshotDir)
}
for _, ent := range ents {
fileInfo, err := os.Stat(filepath.Join(snapshotDir, ent.Name()))
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
continue
}
snapshots = append(snapshots, ent.Name())
}
return snapshots, nil
}
// genericVFSRenameVolumeSnapshot is a generic RenameVolumeSnapshot implementation for VFS-only drivers.
func genericVFSRenameVolumeSnapshot(d Driver, snapVol Volume, newSnapshotName string, op *operations.Operation) error {
if !snapVol.IsSnapshot() {
return fmt.Errorf("Volume must be a snapshot")
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldPath := snapVol.MountPath()
newPath := GetVolumeMountPath(d.Name(), snapVol.volType, GetSnapshotVolumeName(parentName, newSnapshotName))
err := os.Rename(oldPath, newPath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", oldPath, newPath)
}
return nil
}
// genericVFSMigrateVolume is a generic MigrateVolume implementation for VFS-only drivers.
func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
bwlimit := d.Config()["rsync.bwlimit"]
var rsyncArgs []string
// For VM volumes, exclude the generic root disk image file from being transferred via rsync, as it will
// be transferred later using a different method.
if vol.IsVMBlock() {
if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
rsyncArgs = []string{"--exclude", genericVolumeDiskFile}
} else if vol.contentType == ContentTypeBlock && volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC || vol.contentType == ContentTypeFS && volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
// Define function to send a filesystem volume.
sendFSVol := func(vol Volume, conn io.ReadWriteCloser, mountPath string) error {
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
path := shared.AddSlash(mountPath)
d.Logger().Debug("Sending filesystem volume", log.Ctx{"volName": vol.name, "path": path, "bwlimit": bwlimit, "rsyncArgs": rsyncArgs})
return rsync.Send(vol.name, path, conn, wrapper, volSrcArgs.MigrationType.Features, bwlimit, s.OS.ExecPath, rsyncArgs...)
}
// Define function to send a block volume.
sendBlockVol := func(vol Volume, conn io.ReadWriteCloser) error {
// Close when done to indicate to target side we are finished sending this volume.
defer conn.Close()
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", vol.name)
}
path, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
from, err := os.Open(path)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", path)
}
defer from.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(from)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Sending block volume", log.Ctx{"volName": vol.name, "path": path})
_, err = io.Copy(conn, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying %q to migration connection", path)
}
return nil
}
// Send all snapshots to target.
for _, snapName := range volSrcArgs.Snapshots {
snapshot, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
// Send snapshot to target (ensure local snapshot volume is mounted if needed).
err = snapshot.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
err := sendFSVol(snapshot, conn, mountPath)
if err != nil {
return err
}
}
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = sendBlockVol(snapshot, conn)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
}
// Send volume to target (ensure local volume is mounted if needed).
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
err := sendFSVol(vol, conn, mountPath)
if err != nil {
return err
}
}
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err := sendBlockVol(vol, conn)
if err != nil {
return err
}
}
return nil
}, op)
}
// genericVFSCreateVolumeFromMigration receives a volume and its snapshots over a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Check migration transport type matches volume type.
if vol.contentType == ContentTypeBlock {
if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !volTargetArgs.Refresh {
err := d.CreateVolume(vol, preFiller, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
recvFSVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", volName)
}
d.Logger().Debug("Receiving filesystem volume", log.Ctx{"volName": volName, "path": path})
return rsync.Recv(path, conn, wrapper, volTargetArgs.MigrationType.Features)
}
recvBlockVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", volName)
}
to, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", path)
}
defer to.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(conn)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Receiving block volume", log.Ctx{"volName": volName, "path": path})
_, err = io.Copy(to, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying from migration connection to %q", path)
}
return nil
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
var err error
// Setup paths to the main volume. We will receive each snapshot to these paths and then create
// a snapshot of the main volume for each one.
path := shared.AddSlash(mountPath)
pathBlock := ""
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
pathBlock, err = d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
}
// Snapshots are sent first by the sender, so create these first.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapshotName, vol.config, vol.poolConfig)
if snapVol.contentType != ContentTypeBlock || snapVol.volType != VolumeTypeCustom { // Receive the filesystem snapshot first (as it is sent first).
err = recvFSVol(snapVol.name, conn, path)
if err != nil {
return err
}
}
// Receive the block snapshot next (if needed).
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = recvBlockVol(snapVol.name, conn, pathBlock)
if err != nil {
return err
}
}
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
// Receive main volume.
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Receive the final main volume sync if needed.
if volTargetArgs.Live && (vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom) {
d.Logger().Debug("Starting main volume final sync", log.Ctx{"volName": vol.name, "path": path})
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Run EnsureMountPath after mounting and syncing to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Receive the block volume next (if needed).
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = recvBlockVol(vol.name, conn, pathBlock)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// genericVFSHasVolume is a generic HasVolume implementation for VFS-only drivers.
func genericVFSHasVolume(vol Volume) bool {
if shared.PathExists(vol.MountPath()) {
return true
}
return false
}
// genericVFSGetVolumeDiskPath is a generic GetVolumeDiskPath implementation for VFS-only drivers.
func genericVFSGetVolumeDiskPath(vol Volume) (string, error) {
if vol.contentType != ContentTypeBlock {
return "", ErrNotSupported
}
return filepath.Join(vol.MountPath(), genericVolumeDiskFile), nil
}
// genericVFSBackupVolume is a generic BackupVolume implementation for VFS-only drivers.
func genericVFSBackupVolume(d Driver, vol Volume, tarWriter *instancewriter.InstanceTarWriter, snapshots bool, op *operations.Operation) error {
// Define a function that can copy a volume into the backup target location.
backupVolume := func(v Volume, prefix string) error {
return v.MountTask(func(mountPath string, op *operations.Operation) error {
// Reset hard link cache as we are copying a new volume (instance or snapshot).
tarWriter.ResetHardLinkMap()
if v.IsVMBlock() {
blockPath, err := d.GetVolumeDiskPath(v)
if err != nil {
errMsg := "Error getting VM block volume disk path"
if vol.volType == VolumeTypeCustom {
errMsg = "Error getting custom block volume disk path"
}
return errors.Wrapf(err, errMsg)
}
var blockDiskSize int64
var exclude []string
// Get size of disk block device for tarball header.
blockDiskSize, err = BlockDiskSizeBytes(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block device size %q", blockPath)
}
if !shared.IsBlockdevPath(blockPath) {
// Exclude the VM root disk path from the config volume backup part.
// We will read it as a block device later instead.
exclude = append(exclude, blockPath)
}
logMsg := "Copying virtual machine config volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom config volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": mountPath, "prefix": prefix})
err = filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip any exluded files.
if shared.StringInSlice(srcPath, exclude) {
return nil
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
err = tarWriter.WriteFile(name, srcPath, fi, false)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
if err != nil {
return err
}
name := fmt.Sprintf("%s.img", prefix)
logMsg = "Copying virtual machine block volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom block volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": blockPath, "file": name, "size": blockDiskSize})
from, err := os.Open(blockPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", blockPath)
}
defer from.Close()
fi := instancewriter.FileInfo{
FileName: name,
FileSize: blockDiskSize,
FileMode: 0600,
FileModTime: time.Now(),
}
err = tarWriter.WriteFileFromReader(from, &fi)
if err != nil {
return errors.Wrapf(err, "Error copying %q as %q to tarball", blockPath, name)
}
} else {
logMsg := "Copying container filesystem volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom filesystem volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": mountPath, "prefix": prefix})
return filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
logger.Warnf("File vanished during export: %q, skipping", srcPath)
return nil
}
return errors.Wrapf(err, "Error walking file during export: %q", srcPath)
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
// Write the file to the tarball with ignoreGrowth enabled so that if the
// source file grows during copy we only copy up to the original size.
// This means that the file in the tarball may be inconsistent.
err = tarWriter.WriteFile(name, srcPath, fi, true)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
}
return nil
}, op)
}
// Handle snapshots.
if snapshots {
snapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
snapshotsPrefix = "backup/virtual-machine-snapshots"
} else if vol.volType == VolumeTypeCustom {
snapshotsPrefix = "backup/volume-snapshots"
}
// List the snapshots.
snapshots, err := vol.Snapshots(op)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name())
prefix := filepath.Join(snapshotsPrefix, snapName)
err := backupVolume(snapshot, prefix)
if err != nil {
return err
}
}
}
// Copy the main volume itself.
prefix := "backup/container"
if vol.IsVMBlock() {
prefix = "backup/virtual-machine"
} else if vol.volType == VolumeTypeCustom {
prefix = "backup/volume"
}
err := backupVolume(vol, prefix)
if err != nil {
return err
}
return nil
}
// genericVFSBackupUnpack unpacks a non-optimized backup tarball through a storage driver.
// Returns a post hook function that should be called once the database entries for the restored backup have been
// created and a revert function that can be used to undo the actions this function performs should something
// subsequently fail. For VolumeTypeCustom volumes, a nil post hook is returned as it is expected that the DB
// record be created before the volume is unpacked due to differences in the archive format that allows this.
func genericVFSBackupUnpack(d Driver, vol Volume, snapshots []string, srcData io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), error) {
// Define function to unpack a volume from a backup tarball file.
unpackVolume := func(r io.ReadSeeker, tarArgs []string, unpacker []string, srcPrefix string, mountPath string) error {
volTypeName := "container"
if vol.IsVMBlock() {
volTypeName = "virtual machine"
} else if vol.volType == VolumeTypeCustom {
volTypeName = "custom"
}
// Clear the volume ready for unpack.
err := wipeDirectory(mountPath)
if err != nil {
return errors.Wrapf(err, "Error clearing volume before unpack")
}
// Prepare tar arguments.
srcParts := strings.Split(srcPrefix, string(os.PathSeparator))
args := append(tarArgs, []string{
"-",
"--xattrs-include=*",
"-C", mountPath,
}...)
if vol.Type() == VolumeTypeCustom {
// If the volume type is custom, then we need to ensure that we restore the top level
// directory's ownership from the backup. We cannot use --strip-components flag because it
// removes the top level directory from the unpack list. Instead we use the --transform
// flag to remove the prefix path and transform it into the "." current unpack directory.
args = append(args, fmt.Sprintf("--transform=s/^%s/./", strings.ReplaceAll(srcPrefix, "/", `\/`)))
} else {
// For instance volumes, the user created files are stored in the rootfs sub-directory
// and so strip-components flag works fine.
args = append(args, fmt.Sprintf("--strip-components=%d", len(srcParts)))
}
// Directory to unpack comes after other options.
args = append(args, srcPrefix)
// Extract filesystem volume.
d.Logger().Debug(fmt.Sprintf("Unpacking %s filesystem volume", volTypeName), log.Ctx{"source": srcPrefix, "target": mountPath, "args": args})
srcData.Seek(0, 0)
err = shared.RunCommandWithFds(r, nil, "tar", args...)
if err != nil {
return errors.Wrapf(err, "Error starting unpack")
}
// Extract block file to block volume if VM.
if vol.IsVMBlock() {
targetPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
srcFile := fmt.Sprintf("%s.img", srcPrefix)
d.Logger().Debug("Unpacking virtual machine block volume", log.Ctx{"source": srcFile, "target": targetPath})
tr, cancelFunc, err := shared.CompressedTarReader(context.Background(), r, unpacker)
if err != nil {
return err
}
defer cancelFunc()
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive.
}
if err != nil {
return err
}
if hdr.Name == srcFile {
// Open block file (use O_CREATE to support drivers that use image files).
to, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", targetPath)
}
defer to.Close()
// Restore original size of volume from raw block backup file size.
d.Logger().Debug("Setting volume size from source", log.Ctx{"source": srcFile, "target": targetPath, "size": hdr.Size})
// Allow potentially destructive resize of volume as we are going to be
// overwriting it entirely anyway. This allows shrinking of block volumes.
vol.allowUnsafeResize = true
err = d.SetVolumeQuota(vol, fmt.Sprintf("%d", hdr.Size), op)
if err != nil {
return err
}
_, err = io.Copy(to, tr)
if err != nil {
return err
}
cancelFunc()
return nil
}
}
return fmt.Errorf("Could not find %q", srcFile)
}
return nil
}
revert := revert.New()
defer revert.Fail()
// Find the compression algorithm used for backup source data.
srcData.Seek(0, 0)
tarArgs, _, unpacker, err := shared.DetectCompressionFile(srcData)
if err != nil {
return nil, nil, err
}
if d.HasVolume(vol) {
return nil, nil, fmt.Errorf("Cannot restore volume, already exists on target")
}
// Create new empty volume.
err = d.CreateVolume(vol, nil, nil)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
if len(snapshots) > 0 {
// Create new snapshots directory.
err := createParentSnapshotDirIfMissing(d.Name(), vol.volType, vol.name)
if err != nil {
return nil, nil, err
}
}
backupSnapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
backupSnapshotsPrefix = "backup/virtual-machine-snapshots"
} else if vol.volType == VolumeTypeCustom {
backupSnapshotsPrefix = "backup/volume-snapshots"
}
for _, snapName := range snapshots {
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
backupSnapshotPrefix := fmt.Sprintf("%s/%s", backupSnapshotsPrefix, snapName)
return unpackVolume(srcData, tarArgs, unpacker, backupSnapshotPrefix, mountPath)
}, op)
if err != nil {
return nil, nil, err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return nil, nil, err
}
d.Logger().Debug("Creating volume snapshot", log.Ctx{"snapshotName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
}
err = d.MountVolume(vol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.UnmountVolume(vol, false, op) })
backupPrefix := "backup/container"
if vol.IsVMBlock() {
backupPrefix = "backup/virtual-machine"
} else if vol.volType == VolumeTypeCustom {
backupPrefix = "backup/volume"
}
mountPath := vol.MountPath()
err = unpackVolume(srcData, tarArgs, unpacker, backupPrefix, mountPath)
if err != nil {
return nil, nil, err
}
// Run EnsureMountPath after mounting and unpacking to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return nil, nil, err
}
revertExternal := revert.Clone() // Clone before calling revert.Success() so we can return the Fail func.
revert.Success()
var postHook func(vol Volume) error
if vol.volType != VolumeTypeCustom {
// Leave volume mounted (as is needed during backup.yaml generation during latter parts of the
// backup restoration process). Create a post hook function that will be called at the end of the
// backup restore process to unmount the volume if needed.
postHook = func(vol Volume) error {
d.UnmountVolume(vol, false, op)
return nil
}
} else {
// For custom volumes unmount now, there is no post hook as there is no backup.yaml to generate.
d.UnmountVolume(vol, false, op)
}
return postHook, revertExternal.Fail, nil
}
// genericVFSCopyVolume copies a volume and its snapshots using a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, srcVol Volume, srcSnapshots []Volume, refresh bool, op *operations.Operation) error {
if vol.contentType != srcVol.contentType {
return fmt.Errorf("Content type of source and target must be the same")
}
bwlimit := d.Config()["rsync.bwlimit"]
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !refresh {
err := d.CreateVolume(vol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
// If copying snapshots is indicated, check the source isn't itself a snapshot.
if len(srcSnapshots) > 0 && !srcVol.IsSnapshot() {
for _, srcSnapshot := range srcSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.name)
// Mount the source snapshot.
err := srcSnapshot.MountTask(func(srcMountPath string, op *operations.Operation) error {
// Copy the snapshot.
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcSnapshot.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcSnapshot)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
fullSnapName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapName, vol.config, vol.poolConfig)
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Copy source to destination (mounting each volume if needed).
err := srcVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcVol.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
// Run EnsureMountPath after mounting and copying to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
lxd/storage/drivers/generic: Fix VM rename with ZFS
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package drivers
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/rsync"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
)
// genericVolumeDiskFile used to indicate the file name used for block volume disk files.
const genericVolumeDiskFile = "root.img"
// genericVFSGetResources is a generic GetResources implementation for VFS-only drivers.
func genericVFSGetResources(d Driver) (*api.ResourcesStoragePool, error) {
// Get the VFS information
st, err := shared.Statvfs(GetPoolMountPath(d.Name()))
if err != nil {
return nil, err
}
// Fill in the struct
res := api.ResourcesStoragePool{}
res.Space.Total = st.Blocks * uint64(st.Bsize)
res.Space.Used = (st.Blocks - st.Bfree) * uint64(st.Bsize)
// Some filesystems don't report inodes since they allocate them
// dynamically e.g. btrfs.
if st.Files > 0 {
res.Inodes.Total = st.Files
res.Inodes.Used = st.Files - st.Ffree
}
return &res, nil
}
// genericVFSRenameVolume is a generic RenameVolume implementation for VFS-only drivers.
func genericVFSRenameVolume(d Driver, vol Volume, newVolName string, op *operations.Operation) error {
if vol.IsSnapshot() {
return fmt.Errorf("Volume must not be a snapshot")
}
revert := revert.New()
defer revert.Fail()
// Rename the volume itself.
srcVolumePath := GetVolumeMountPath(d.Name(), vol.volType, vol.name)
dstVolumePath := GetVolumeMountPath(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcVolumePath) {
err := os.Rename(srcVolumePath, dstVolumePath)
if err != nil {
return errors.Wrapf(err, "Failed to rename %q to %q", srcVolumePath, dstVolumePath)
}
revert.Add(func() { os.Rename(dstVolumePath, srcVolumePath) })
}
// And if present, the snapshots too.
srcSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
dstSnapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, newVolName)
if shared.PathExists(srcSnapshotDir) {
err := os.Rename(srcSnapshotDir, dstSnapshotDir)
if err != nil {
return errors.Wrapf(err, "Failed to rename %q to %q", srcSnapshotDir, dstSnapshotDir)
}
revert.Add(func() { os.Rename(dstSnapshotDir, srcSnapshotDir) })
}
revert.Success()
return nil
}
// genericVFSVolumeSnapshots is a generic VolumeSnapshots implementation for VFS-only drivers.
func genericVFSVolumeSnapshots(d Driver, vol Volume, op *operations.Operation) ([]string, error) {
snapshotDir := GetVolumeSnapshotDir(d.Name(), vol.volType, vol.name)
snapshots := []string{}
ents, err := ioutil.ReadDir(snapshotDir)
if err != nil {
// If the snapshots directory doesn't exist, there are no snapshots.
if os.IsNotExist(err) {
return snapshots, nil
}
return nil, errors.Wrapf(err, "Failed to list directory '%s'", snapshotDir)
}
for _, ent := range ents {
fileInfo, err := os.Stat(filepath.Join(snapshotDir, ent.Name()))
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
continue
}
snapshots = append(snapshots, ent.Name())
}
return snapshots, nil
}
// genericVFSRenameVolumeSnapshot is a generic RenameVolumeSnapshot implementation for VFS-only drivers.
func genericVFSRenameVolumeSnapshot(d Driver, snapVol Volume, newSnapshotName string, op *operations.Operation) error {
if !snapVol.IsSnapshot() {
return fmt.Errorf("Volume must be a snapshot")
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldPath := snapVol.MountPath()
newPath := GetVolumeMountPath(d.Name(), snapVol.volType, GetSnapshotVolumeName(parentName, newSnapshotName))
if shared.PathExists(oldPath) {
err := os.Rename(oldPath, newPath)
if err != nil {
return errors.Wrapf(err, "Failed to rename '%s' to '%s'", oldPath, newPath)
}
}
return nil
}
// genericVFSMigrateVolume is a generic MigrateVolume implementation for VFS-only drivers.
func genericVFSMigrateVolume(d Driver, s *state.State, vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
bwlimit := d.Config()["rsync.bwlimit"]
var rsyncArgs []string
// For VM volumes, exclude the generic root disk image file from being transferred via rsync, as it will
// be transferred later using a different method.
if vol.IsVMBlock() {
if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
rsyncArgs = []string{"--exclude", genericVolumeDiskFile}
} else if vol.contentType == ContentTypeBlock && volSrcArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC || vol.contentType == ContentTypeFS && volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
// Define function to send a filesystem volume.
sendFSVol := func(vol Volume, conn io.ReadWriteCloser, mountPath string) error {
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
path := shared.AddSlash(mountPath)
d.Logger().Debug("Sending filesystem volume", log.Ctx{"volName": vol.name, "path": path, "bwlimit": bwlimit, "rsyncArgs": rsyncArgs})
return rsync.Send(vol.name, path, conn, wrapper, volSrcArgs.MigrationType.Features, bwlimit, s.OS.ExecPath, rsyncArgs...)
}
// Define function to send a block volume.
sendBlockVol := func(vol Volume, conn io.ReadWriteCloser) error {
// Close when done to indicate to target side we are finished sending this volume.
defer conn.Close()
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", vol.name)
}
path, err := d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
from, err := os.Open(path)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", path)
}
defer from.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(from)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Sending block volume", log.Ctx{"volName": vol.name, "path": path})
_, err = io.Copy(conn, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying %q to migration connection", path)
}
return nil
}
// Send all snapshots to target.
for _, snapName := range volSrcArgs.Snapshots {
snapshot, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
// Send snapshot to target (ensure local snapshot volume is mounted if needed).
err = snapshot.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
err := sendFSVol(snapshot, conn, mountPath)
if err != nil {
return err
}
}
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = sendBlockVol(snapshot, conn)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
}
// Send volume to target (ensure local volume is mounted if needed).
return vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
err := sendFSVol(vol, conn, mountPath)
if err != nil {
return err
}
}
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err := sendBlockVol(vol, conn)
if err != nil {
return err
}
}
return nil
}, op)
}
// genericVFSCreateVolumeFromMigration receives a volume and its snapshots over a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCreateVolumeFromMigration(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Check migration transport type matches volume type.
if vol.contentType == ContentTypeBlock {
if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_BLOCK_AND_RSYNC {
return ErrNotSupported
}
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
return ErrNotSupported
}
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !volTargetArgs.Refresh {
err := d.CreateVolume(vol, preFiller, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
recvFSVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", volName)
}
d.Logger().Debug("Receiving filesystem volume", log.Ctx{"volName": volName, "path": path})
return rsync.Recv(path, conn, wrapper, volTargetArgs.MigrationType.Features)
}
recvBlockVol := func(volName string, conn io.ReadWriteCloser, path string) error {
var wrapper *ioprogress.ProgressTracker
if volTargetArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "block_progress", volName)
}
to, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", path)
}
defer to.Close()
// Setup progress tracker.
fromPipe := io.ReadCloser(conn)
if wrapper != nil {
fromPipe = &ioprogress.ProgressReader{
ReadCloser: fromPipe,
Tracker: wrapper,
}
}
d.Logger().Debug("Receiving block volume", log.Ctx{"volName": volName, "path": path})
_, err = io.Copy(to, fromPipe)
if err != nil {
return errors.Wrapf(err, "Error copying from migration connection to %q", path)
}
return nil
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
var err error
// Setup paths to the main volume. We will receive each snapshot to these paths and then create
// a snapshot of the main volume for each one.
path := shared.AddSlash(mountPath)
pathBlock := ""
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
pathBlock, err = d.GetVolumeDiskPath(vol)
if err != nil {
return errors.Wrapf(err, "Error getting VM block volume disk path")
}
}
// Snapshots are sent first by the sender, so create these first.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapshotName, vol.config, vol.poolConfig)
if snapVol.contentType != ContentTypeBlock || snapVol.volType != VolumeTypeCustom { // Receive the filesystem snapshot first (as it is sent first).
err = recvFSVol(snapVol.name, conn, path)
if err != nil {
return err
}
}
// Receive the block snapshot next (if needed).
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = recvBlockVol(snapVol.name, conn, pathBlock)
if err != nil {
return err
}
}
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
if vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom {
// Receive main volume.
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Receive the final main volume sync if needed.
if volTargetArgs.Live && (vol.contentType != ContentTypeBlock || vol.volType != VolumeTypeCustom) {
d.Logger().Debug("Starting main volume final sync", log.Ctx{"volName": vol.name, "path": path})
err = recvFSVol(vol.name, conn, path)
if err != nil {
return err
}
}
// Run EnsureMountPath after mounting and syncing to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Receive the block volume next (if needed).
if vol.IsVMBlock() || vol.contentType == ContentTypeBlock && vol.volType == VolumeTypeCustom {
err = recvBlockVol(vol.name, conn, pathBlock)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// genericVFSHasVolume is a generic HasVolume implementation for VFS-only drivers.
func genericVFSHasVolume(vol Volume) bool {
if shared.PathExists(vol.MountPath()) {
return true
}
return false
}
// genericVFSGetVolumeDiskPath is a generic GetVolumeDiskPath implementation for VFS-only drivers.
func genericVFSGetVolumeDiskPath(vol Volume) (string, error) {
if vol.contentType != ContentTypeBlock {
return "", ErrNotSupported
}
return filepath.Join(vol.MountPath(), genericVolumeDiskFile), nil
}
// genericVFSBackupVolume is a generic BackupVolume implementation for VFS-only drivers.
func genericVFSBackupVolume(d Driver, vol Volume, tarWriter *instancewriter.InstanceTarWriter, snapshots bool, op *operations.Operation) error {
// Define a function that can copy a volume into the backup target location.
backupVolume := func(v Volume, prefix string) error {
return v.MountTask(func(mountPath string, op *operations.Operation) error {
// Reset hard link cache as we are copying a new volume (instance or snapshot).
tarWriter.ResetHardLinkMap()
if v.IsVMBlock() {
blockPath, err := d.GetVolumeDiskPath(v)
if err != nil {
errMsg := "Error getting VM block volume disk path"
if vol.volType == VolumeTypeCustom {
errMsg = "Error getting custom block volume disk path"
}
return errors.Wrapf(err, errMsg)
}
var blockDiskSize int64
var exclude []string
// Get size of disk block device for tarball header.
blockDiskSize, err = BlockDiskSizeBytes(blockPath)
if err != nil {
return errors.Wrapf(err, "Error getting block device size %q", blockPath)
}
if !shared.IsBlockdevPath(blockPath) {
// Exclude the VM root disk path from the config volume backup part.
// We will read it as a block device later instead.
exclude = append(exclude, blockPath)
}
logMsg := "Copying virtual machine config volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom config volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": mountPath, "prefix": prefix})
err = filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip any exluded files.
if shared.StringInSlice(srcPath, exclude) {
return nil
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
err = tarWriter.WriteFile(name, srcPath, fi, false)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
if err != nil {
return err
}
name := fmt.Sprintf("%s.img", prefix)
logMsg = "Copying virtual machine block volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom block volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": blockPath, "file": name, "size": blockDiskSize})
from, err := os.Open(blockPath)
if err != nil {
return errors.Wrapf(err, "Error opening file for reading %q", blockPath)
}
defer from.Close()
fi := instancewriter.FileInfo{
FileName: name,
FileSize: blockDiskSize,
FileMode: 0600,
FileModTime: time.Now(),
}
err = tarWriter.WriteFileFromReader(from, &fi)
if err != nil {
return errors.Wrapf(err, "Error copying %q as %q to tarball", blockPath, name)
}
} else {
logMsg := "Copying container filesystem volume"
if vol.volType == VolumeTypeCustom {
logMsg = "Copying custom filesystem volume"
}
d.Logger().Debug(logMsg, log.Ctx{"sourcePath": mountPath, "prefix": prefix})
return filepath.Walk(mountPath, func(srcPath string, fi os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
logger.Warnf("File vanished during export: %q, skipping", srcPath)
return nil
}
return errors.Wrapf(err, "Error walking file during export: %q", srcPath)
}
name := filepath.Join(prefix, strings.TrimPrefix(srcPath, mountPath))
// Write the file to the tarball with ignoreGrowth enabled so that if the
// source file grows during copy we only copy up to the original size.
// This means that the file in the tarball may be inconsistent.
err = tarWriter.WriteFile(name, srcPath, fi, true)
if err != nil {
return errors.Wrapf(err, "Error adding %q as %q to tarball", srcPath, name)
}
return nil
})
}
return nil
}, op)
}
// Handle snapshots.
if snapshots {
snapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
snapshotsPrefix = "backup/virtual-machine-snapshots"
} else if vol.volType == VolumeTypeCustom {
snapshotsPrefix = "backup/volume-snapshots"
}
// List the snapshots.
snapshots, err := vol.Snapshots(op)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name())
prefix := filepath.Join(snapshotsPrefix, snapName)
err := backupVolume(snapshot, prefix)
if err != nil {
return err
}
}
}
// Copy the main volume itself.
prefix := "backup/container"
if vol.IsVMBlock() {
prefix = "backup/virtual-machine"
} else if vol.volType == VolumeTypeCustom {
prefix = "backup/volume"
}
err := backupVolume(vol, prefix)
if err != nil {
return err
}
return nil
}
// genericVFSBackupUnpack unpacks a non-optimized backup tarball through a storage driver.
// Returns a post hook function that should be called once the database entries for the restored backup have been
// created and a revert function that can be used to undo the actions this function performs should something
// subsequently fail. For VolumeTypeCustom volumes, a nil post hook is returned as it is expected that the DB
// record be created before the volume is unpacked due to differences in the archive format that allows this.
func genericVFSBackupUnpack(d Driver, vol Volume, snapshots []string, srcData io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), error) {
// Define function to unpack a volume from a backup tarball file.
unpackVolume := func(r io.ReadSeeker, tarArgs []string, unpacker []string, srcPrefix string, mountPath string) error {
volTypeName := "container"
if vol.IsVMBlock() {
volTypeName = "virtual machine"
} else if vol.volType == VolumeTypeCustom {
volTypeName = "custom"
}
// Clear the volume ready for unpack.
err := wipeDirectory(mountPath)
if err != nil {
return errors.Wrapf(err, "Error clearing volume before unpack")
}
// Prepare tar arguments.
srcParts := strings.Split(srcPrefix, string(os.PathSeparator))
args := append(tarArgs, []string{
"-",
"--xattrs-include=*",
"-C", mountPath,
}...)
if vol.Type() == VolumeTypeCustom {
// If the volume type is custom, then we need to ensure that we restore the top level
// directory's ownership from the backup. We cannot use --strip-components flag because it
// removes the top level directory from the unpack list. Instead we use the --transform
// flag to remove the prefix path and transform it into the "." current unpack directory.
args = append(args, fmt.Sprintf("--transform=s/^%s/./", strings.ReplaceAll(srcPrefix, "/", `\/`)))
} else {
// For instance volumes, the user created files are stored in the rootfs sub-directory
// and so strip-components flag works fine.
args = append(args, fmt.Sprintf("--strip-components=%d", len(srcParts)))
}
// Directory to unpack comes after other options.
args = append(args, srcPrefix)
// Extract filesystem volume.
d.Logger().Debug(fmt.Sprintf("Unpacking %s filesystem volume", volTypeName), log.Ctx{"source": srcPrefix, "target": mountPath, "args": args})
srcData.Seek(0, 0)
err = shared.RunCommandWithFds(r, nil, "tar", args...)
if err != nil {
return errors.Wrapf(err, "Error starting unpack")
}
// Extract block file to block volume if VM.
if vol.IsVMBlock() {
targetPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
srcFile := fmt.Sprintf("%s.img", srcPrefix)
d.Logger().Debug("Unpacking virtual machine block volume", log.Ctx{"source": srcFile, "target": targetPath})
tr, cancelFunc, err := shared.CompressedTarReader(context.Background(), r, unpacker)
if err != nil {
return err
}
defer cancelFunc()
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive.
}
if err != nil {
return err
}
if hdr.Name == srcFile {
// Open block file (use O_CREATE to support drivers that use image files).
to, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "Error opening file for writing %q", targetPath)
}
defer to.Close()
// Restore original size of volume from raw block backup file size.
d.Logger().Debug("Setting volume size from source", log.Ctx{"source": srcFile, "target": targetPath, "size": hdr.Size})
// Allow potentially destructive resize of volume as we are going to be
// overwriting it entirely anyway. This allows shrinking of block volumes.
vol.allowUnsafeResize = true
err = d.SetVolumeQuota(vol, fmt.Sprintf("%d", hdr.Size), op)
if err != nil {
return err
}
_, err = io.Copy(to, tr)
if err != nil {
return err
}
cancelFunc()
return nil
}
}
return fmt.Errorf("Could not find %q", srcFile)
}
return nil
}
revert := revert.New()
defer revert.Fail()
// Find the compression algorithm used for backup source data.
srcData.Seek(0, 0)
tarArgs, _, unpacker, err := shared.DetectCompressionFile(srcData)
if err != nil {
return nil, nil, err
}
if d.HasVolume(vol) {
return nil, nil, fmt.Errorf("Cannot restore volume, already exists on target")
}
// Create new empty volume.
err = d.CreateVolume(vol, nil, nil)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
if len(snapshots) > 0 {
// Create new snapshots directory.
err := createParentSnapshotDirIfMissing(d.Name(), vol.volType, vol.name)
if err != nil {
return nil, nil, err
}
}
backupSnapshotsPrefix := "backup/snapshots"
if vol.IsVMBlock() {
backupSnapshotsPrefix = "backup/virtual-machine-snapshots"
} else if vol.volType == VolumeTypeCustom {
backupSnapshotsPrefix = "backup/volume-snapshots"
}
for _, snapName := range snapshots {
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
backupSnapshotPrefix := fmt.Sprintf("%s/%s", backupSnapshotsPrefix, snapName)
return unpackVolume(srcData, tarArgs, unpacker, backupSnapshotPrefix, mountPath)
}, op)
if err != nil {
return nil, nil, err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return nil, nil, err
}
d.Logger().Debug("Creating volume snapshot", log.Ctx{"snapshotName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
}
err = d.MountVolume(vol, op)
if err != nil {
return nil, nil, err
}
revert.Add(func() { d.UnmountVolume(vol, false, op) })
backupPrefix := "backup/container"
if vol.IsVMBlock() {
backupPrefix = "backup/virtual-machine"
} else if vol.volType == VolumeTypeCustom {
backupPrefix = "backup/volume"
}
mountPath := vol.MountPath()
err = unpackVolume(srcData, tarArgs, unpacker, backupPrefix, mountPath)
if err != nil {
return nil, nil, err
}
// Run EnsureMountPath after mounting and unpacking to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return nil, nil, err
}
revertExternal := revert.Clone() // Clone before calling revert.Success() so we can return the Fail func.
revert.Success()
var postHook func(vol Volume) error
if vol.volType != VolumeTypeCustom {
// Leave volume mounted (as is needed during backup.yaml generation during latter parts of the
// backup restoration process). Create a post hook function that will be called at the end of the
// backup restore process to unmount the volume if needed.
postHook = func(vol Volume) error {
d.UnmountVolume(vol, false, op)
return nil
}
} else {
// For custom volumes unmount now, there is no post hook as there is no backup.yaml to generate.
d.UnmountVolume(vol, false, op)
}
return postHook, revertExternal.Fail, nil
}
// genericVFSCopyVolume copies a volume and its snapshots using a non-optimized method.
// initVolume is run against the main volume (not the snapshots) and is often used for quota initialization.
func genericVFSCopyVolume(d Driver, initVolume func(vol Volume) (func(), error), vol Volume, srcVol Volume, srcSnapshots []Volume, refresh bool, op *operations.Operation) error {
if vol.contentType != srcVol.contentType {
return fmt.Errorf("Content type of source and target must be the same")
}
bwlimit := d.Config()["rsync.bwlimit"]
revert := revert.New()
defer revert.Fail()
// Create the main volume if not refreshing.
if !refresh {
err := d.CreateVolume(vol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
// Ensure the volume is mounted.
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
// If copying snapshots is indicated, check the source isn't itself a snapshot.
if len(srcSnapshots) > 0 && !srcVol.IsSnapshot() {
for _, srcSnapshot := range srcSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.name)
// Mount the source snapshot.
err := srcSnapshot.MountTask(func(srcMountPath string, op *operations.Operation) error {
// Copy the snapshot.
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcSnapshot.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcSnapshot)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
fullSnapName := GetSnapshotVolumeName(vol.name, snapName)
snapVol := NewVolume(d, d.Name(), vol.volType, vol.contentType, fullSnapName, vol.config, vol.poolConfig)
// Create the snapshot itself.
d.Logger().Debug("Creating snapshot", log.Ctx{"volName": snapVol.Name()})
err = d.CreateVolumeSnapshot(snapVol, op)
if err != nil {
return err
}
// Setup the revert.
revert.Add(func() {
d.DeleteVolumeSnapshot(snapVol, op)
})
}
}
// Run volume-specific init logic.
if initVolume != nil {
_, err := initVolume(vol)
if err != nil {
return err
}
}
// Copy source to destination (mounting each volume if needed).
err := srcVol.MountTask(func(srcMountPath string, op *operations.Operation) error {
_, err := rsync.LocalCopy(srcMountPath, mountPath, bwlimit, true)
if err != nil {
return err
}
if srcVol.IsVMBlock() {
srcDevPath, err := d.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
targetDevPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = copyDevice(srcDevPath, targetDevPath)
if err != nil {
return err
}
}
return nil
}, op)
if err != nil {
return err
}
// Run EnsureMountPath after mounting and copying to ensure the mounted directory has the
// correct permissions set.
err = vol.EnsureMountPath()
if err != nil {
return err
}
return nil
}, op)
if err != nil {
return err
}
revert.Success()
return nil
}
|
package main
import (
"errors"
"flag"
"github.com/crowdmob/goamz/aws"
"github.com/crowdmob/goamz/cloudwatch"
mp "github.com/mackerelio/go-mackerel-plugin"
"log"
"os"
"time"
)
var graphdef map[string](mp.Graphs) = map[string](mp.Graphs){
"rds.CPUUtilization": mp.Graphs{
Label: "RDS CPU Utilization",
Unit: "percentage",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "CPUUtilization", Label: "CPUUtilization"},
},
},
"rds.DatabaseConnections": mp.Graphs{
Label: "RDS Database Connections",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "DatabaseConnections", Label: "DatabaseConnections"},
},
},
"rds.FreeableMemory": mp.Graphs{
Label: "RDS Freeable Memory",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "FreeableMemory", Label: "FreeableMemory"},
},
},
"rds.FreeStorageSpace": mp.Graphs{
Label: "RDS Free Storage Space",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "FreeStorageSpace", Label: "FreeStorageSpace"},
},
},
"rds.ReplicaLag": mp.Graphs{
Label: "RDS Replica Lag",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReplicaLag", Label: "ReplicaLag"},
},
},
"rds.SwapUsage": mp.Graphs{
Label: "RDS Swap Usage",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "SwapUsage", Label: "SwapUsage"},
},
},
"rds.IOPS": mp.Graphs{
Label: "RDS IOPS",
Unit: "iops",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReadIOPS", Label: "Read"},
mp.Metrics{Name: "WriteIOPS", Label: "Write"},
},
},
"rds.Latency": mp.Graphs{
Label: "RDS Latency in second",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReadLatency", Label: "Read"},
mp.Metrics{Name: "WriteLatency", Label: "Write"},
},
},
}
type RDSPlugin struct {
Region string
AccessKeyId string
SecretAccessKey string
Identifier string
}
func GetLastPoint(cloudWatch *cloudwatch.CloudWatch, dimension *cloudwatch.Dimension, metricName string) (float64, error) {
now := time.Now()
response, err := cloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsRequest{
Dimensions: []cloudwatch.Dimension{*dimension},
StartTime: now.Add(time.Duration(180) * time.Second * -1), // 3 min (to fetch at least 1 data-point)
EndTime: now,
MetricName: metricName,
Period: 60,
Statistics: []string{"Average"},
Namespace: "AWS/RDS",
})
if err != nil {
return 0, err
}
datapoints := response.GetMetricStatisticsResult.Datapoints
if len(datapoints) == 0 {
return 0, errors.New("fetched no datapoints")
}
latest := time.Unix(0, 0)
var latestVal float64
for _, dp := range datapoints {
if dp.Timestamp.Before(latest) {
continue
}
latest = dp.Timestamp
latestVal = dp.Average
}
return latestVal, nil
}
func (p RDSPlugin) FetchMetrics() (map[string]float64, error) {
auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now())
if err != nil {
return nil, err
}
cloudWatch, err := cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint)
if err != nil {
return nil, err
}
stat := make(map[string]float64)
perInstance := &cloudwatch.Dimension{
Name: "DBInstanceIdentifier",
Value: p.Identifier,
}
for _, met := range [...]string{
"BinLogDiskUsage", "CPUUtilization", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory",
"FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency",
"WriteLatency",
} {
v, err := GetLastPoint(cloudWatch, perInstance, met)
if err == nil {
stat[met] = v
} else {
log.Printf("%s: %s", met, err)
}
}
return stat, nil
}
func (p RDSPlugin) GraphDefinition() map[string](mp.Graphs) {
return graphdef
}
func main() {
optRegion := flag.String("region", "", "AWS Region")
optAccessKeyId := flag.String("access-key-id", "", "AWS Access Key ID")
optSecretAccessKey := flag.String("secret-access-key", "", "AWS Secret Access Key")
optIdentifier := flag.String("identifier", "", "DB Instance Identifier")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var rds RDSPlugin
if *optRegion == "" {
rds.Region = aws.InstanceRegion()
} else {
rds.Region = *optRegion
}
rds.Identifier = *optIdentifier
rds.AccessKeyId = *optAccessKeyId
rds.SecretAccessKey = *optSecretAccessKey
helper := mp.NewMackerelPlugin(rds)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = "/tmp/mackerel-plugin-rds"
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
Add Throughput and Network Throughput metrics
package main
import (
"errors"
"flag"
"log"
"os"
"time"
"github.com/crowdmob/goamz/aws"
"github.com/crowdmob/goamz/cloudwatch"
mp "github.com/mackerelio/go-mackerel-plugin"
)
var graphdef map[string](mp.Graphs) = map[string](mp.Graphs){
"rds.CPUUtilization": mp.Graphs{
Label: "RDS CPU Utilization",
Unit: "percentage",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "CPUUtilization", Label: "CPUUtilization"},
},
},
"rds.DatabaseConnections": mp.Graphs{
Label: "RDS Database Connections",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "DatabaseConnections", Label: "DatabaseConnections"},
},
},
"rds.FreeableMemory": mp.Graphs{
Label: "RDS Freeable Memory",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "FreeableMemory", Label: "FreeableMemory"},
},
},
"rds.FreeStorageSpace": mp.Graphs{
Label: "RDS Free Storage Space",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "FreeStorageSpace", Label: "FreeStorageSpace"},
},
},
"rds.ReplicaLag": mp.Graphs{
Label: "RDS Replica Lag",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReplicaLag", Label: "ReplicaLag"},
},
},
"rds.SwapUsage": mp.Graphs{
Label: "RDS Swap Usage",
Unit: "bytes",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "SwapUsage", Label: "SwapUsage"},
},
},
"rds.IOPS": mp.Graphs{
Label: "RDS IOPS",
Unit: "iops",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReadIOPS", Label: "Read"},
mp.Metrics{Name: "WriteIOPS", Label: "Write"},
},
},
"rds.Latency": mp.Graphs{
Label: "RDS Latency in second",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReadLatency", Label: "Read"},
mp.Metrics{Name: "WriteLatency", Label: "Write"},
},
},
"rds.Throughput": mp.Graphs{
Label: "RDS Throughput",
Unit: "bytes/sec",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "ReadThroughput", Label: "Read"},
mp.Metrics{Name: "WriteThroughput", Label: "Write"},
},
},
"rds.NetworkThroughput": mp.Graphs{
Label: "RDS Network Throughput",
Unit: "bytes/sec",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "NetworkTransmitThroughput", Label: "Transmit"},
mp.Metrics{Name: "NetworkReceiveThroughput", Label: "Receive"},
},
},
}
type RDSPlugin struct {
Region string
AccessKeyId string
SecretAccessKey string
Identifier string
}
func GetLastPoint(cloudWatch *cloudwatch.CloudWatch, dimension *cloudwatch.Dimension, metricName string) (float64, error) {
now := time.Now()
response, err := cloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsRequest{
Dimensions: []cloudwatch.Dimension{*dimension},
StartTime: now.Add(time.Duration(180) * time.Second * -1), // 3 min (to fetch at least 1 data-point)
EndTime: now,
MetricName: metricName,
Period: 60,
Statistics: []string{"Average"},
Namespace: "AWS/RDS",
})
if err != nil {
return 0, err
}
datapoints := response.GetMetricStatisticsResult.Datapoints
if len(datapoints) == 0 {
return 0, errors.New("fetched no datapoints")
}
latest := time.Unix(0, 0)
var latestVal float64
for _, dp := range datapoints {
if dp.Timestamp.Before(latest) {
continue
}
latest = dp.Timestamp
latestVal = dp.Average
}
return latestVal, nil
}
func (p RDSPlugin) FetchMetrics() (map[string]float64, error) {
auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now())
if err != nil {
return nil, err
}
cloudWatch, err := cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint)
if err != nil {
return nil, err
}
stat := make(map[string]float64)
perInstance := &cloudwatch.Dimension{
Name: "DBInstanceIdentifier",
Value: p.Identifier,
}
for _, met := range [...]string{
"BinLogDiskUsage", "CPUUtilization", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory",
"FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency",
"WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkTransmitThroughput", "NetworkReceiveThroughput",
} {
v, err := GetLastPoint(cloudWatch, perInstance, met)
if err == nil {
stat[met] = v
} else {
log.Printf("%s: %s", met, err)
}
}
return stat, nil
}
func (p RDSPlugin) GraphDefinition() map[string](mp.Graphs) {
return graphdef
}
func main() {
optRegion := flag.String("region", "", "AWS Region")
optAccessKeyId := flag.String("access-key-id", "", "AWS Access Key ID")
optSecretAccessKey := flag.String("secret-access-key", "", "AWS Secret Access Key")
optIdentifier := flag.String("identifier", "", "DB Instance Identifier")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var rds RDSPlugin
if *optRegion == "" {
rds.Region = aws.InstanceRegion()
} else {
rds.Region = *optRegion
}
rds.Identifier = *optIdentifier
rds.AccessKeyId = *optAccessKeyId
rds.SecretAccessKey = *optSecretAccessKey
helper := mp.NewMackerelPlugin(rds)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = "/tmp/mackerel-plugin-rds"
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
)
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
buf bytes.Buffer
reader *bufio.Reader
frameSize int //Current remaining size of the frame. if ==0 read next frame header
buffer [4]byte
maxLength int
}
type tFramedTransportFactory struct {
factory TTransportFactory
maxLength int
}
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportMaxLength(transport TTransport, maxLength int) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < len(buf) {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enought frame size %d to read %d bytes", p.frameSize, len(buf)))
}
got, err := p.reader.Read(buf)
p.frameSize = p.frameSize - got
//sanity check
if p.frameSize < 0 {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
}
return got, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < 1 {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enought frame size %d to read %d bytes", p.frameSize, 1))
}
c, err = p.reader.ReadByte()
if err == nil {
p.frameSize--
}
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.buf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.buf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush() error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush()
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrameHeader() (int, error) {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return 0, err
}
size := int(binary.BigEndian.Uint32(buf))
if size < 0 || size > p.maxLength {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
return size, nil
}
THRIFT-2839 TFramedTransport read bug
Client: Go
Patch: Chi Vinh Le <9389641e8955f81b0fa5c713fd37607ba1392817@chinet.info>
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
)
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
buf bytes.Buffer
reader *bufio.Reader
frameSize int //Current remaining size of the frame. if ==0 read next frame header
buffer [4]byte
maxLength int
}
type tFramedTransportFactory struct {
factory TTransportFactory
maxLength int
}
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportMaxLength(transport TTransport, maxLength int) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < len(buf) {
frameSize := p.frameSize
tmp := make([]byte, p.frameSize)
l, err = p.Read(tmp)
copy(buf, tmp)
if err == nil {
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
return
}
}
got, err := p.reader.Read(buf)
p.frameSize = p.frameSize - got
//sanity check
if p.frameSize < 0 {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
}
return got, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < 1 {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
}
c, err = p.reader.ReadByte()
if err == nil {
p.frameSize--
}
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.buf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.buf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush() error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush()
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrameHeader() (int, error) {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return 0, err
}
size := int(binary.BigEndian.Uint32(buf))
if size < 0 || size > p.maxLength {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
return size, nil
}
|
package printer
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/appneta/msgp/gen"
"github.com/appneta/msgp/parse"
"github.com/ttacon/chalk"
"golang.org/x/tools/imports"
)
func infof(s string, v ...interface{}) {
fmt.Printf(chalk.Magenta.Color(s), v...)
}
// PrintFile prints the methods for the provided list
// of elements to the given file name and canonical
// package path.
func PrintFile(file string, f *parse.FileSet, mode gen.Method) error {
out, tests, err := generate(f, mode)
if err != nil {
return err
}
// we'll run goimports on the main file
// in another goroutine, and run it here
// for the test file. empirically, this
// takes about the same amount of time as
// doing them in serial when GOMAXPROCS=1,
// and faster otherwise.
res := goformat(file, out.Bytes())
if tests != nil {
testfile := strings.TrimSuffix(file, ".go") + "_test.go"
err = format(testfile, tests.Bytes())
if err != nil {
return err
}
infof(">>> Wrote and formatted \"%s\"\n", testfile)
}
err = <-res
if err != nil {
return err
}
return nil
}
func format(file string, data []byte) error {
out, err := imports.Process(file, data, nil)
if err != nil {
return err
}
return ioutil.WriteFile(file, out, 0600)
}
func goformat(file string, data []byte) <-chan error {
out := make(chan error, 1)
go func(file string, data []byte, end chan error) {
end <- format(file, data)
infof(">>> Wrote and formatted \"%s\"\n", file)
}(file, data, out)
return out
}
func generate(f *parse.FileSet, mode gen.Method) (*bytes.Buffer, *bytes.Buffer, error) {
outbuf := bytes.NewBuffer(make([]byte, 0, 4096))
writePkgHeader(outbuf, f.Package)
writeImportHeader(outbuf, "github.com/appneta/msgp/msgp")
var testbuf *bytes.Buffer
var testwr io.Writer
if mode&gen.Test == gen.Test {
testbuf = bytes.NewBuffer(make([]byte, 0, 4096))
writePkgHeader(testbuf, f.Package)
if mode&(gen.Encode|gen.Decode) != 0 {
writeImportHeader(testbuf, "bytes", "github.com/appneta/msgp/msgp", "testing")
} else {
writeImportHeader(testbuf, "github.com/appneta/msgp/msgp", "testing")
}
testwr = testbuf
}
return outbuf, testbuf, f.PrintTo(gen.NewPrinter(mode, outbuf, testwr))
}
func writePkgHeader(b *bytes.Buffer, name string) {
b.WriteString("package ")
b.WriteString(name)
b.WriteByte('\n')
b.WriteString("// NOTE: THIS FILE WAS PRODUCED BY THE\n// MSGP CODE GENERATION TOOL (github.com/appneta/msgp)\n// DO NOT EDIT\n\n")
}
func writeImportHeader(b *bytes.Buffer, imports ...string) {
b.WriteString("import (\n")
for _, im := range imports {
fmt.Fprintf(b, "\t%q\n", im)
}
b.WriteString(")\n\n")
}
Add "Code generated by msgp" to first line of generated files to trigger Github diff ignore behavior
package printer
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/appneta/msgp/gen"
"github.com/appneta/msgp/parse"
"github.com/ttacon/chalk"
"golang.org/x/tools/imports"
)
func infof(s string, v ...interface{}) {
fmt.Printf(chalk.Magenta.Color(s), v...)
}
// PrintFile prints the methods for the provided list
// of elements to the given file name and canonical
// package path.
func PrintFile(file string, f *parse.FileSet, mode gen.Method) error {
out, tests, err := generate(f, mode)
if err != nil {
return err
}
// we'll run goimports on the main file
// in another goroutine, and run it here
// for the test file. empirically, this
// takes about the same amount of time as
// doing them in serial when GOMAXPROCS=1,
// and faster otherwise.
res := goformat(file, out.Bytes())
if tests != nil {
testfile := strings.TrimSuffix(file, ".go") + "_test.go"
err = format(testfile, tests.Bytes())
if err != nil {
return err
}
infof(">>> Wrote and formatted \"%s\"\n", testfile)
}
err = <-res
if err != nil {
return err
}
return nil
}
func format(file string, data []byte) error {
out, err := imports.Process(file, data, nil)
if err != nil {
return err
}
return ioutil.WriteFile(file, out, 0600)
}
func goformat(file string, data []byte) <-chan error {
out := make(chan error, 1)
go func(file string, data []byte, end chan error) {
end <- format(file, data)
infof(">>> Wrote and formatted \"%s\"\n", file)
}(file, data, out)
return out
}
func generate(f *parse.FileSet, mode gen.Method) (*bytes.Buffer, *bytes.Buffer, error) {
outbuf := bytes.NewBuffer(make([]byte, 0, 4096))
writePkgHeader(outbuf, f.Package)
writeImportHeader(outbuf, "github.com/appneta/msgp/msgp")
var testbuf *bytes.Buffer
var testwr io.Writer
if mode&gen.Test == gen.Test {
testbuf = bytes.NewBuffer(make([]byte, 0, 4096))
writePkgHeader(testbuf, f.Package)
if mode&(gen.Encode|gen.Decode) != 0 {
writeImportHeader(testbuf, "bytes", "github.com/appneta/msgp/msgp", "testing")
} else {
writeImportHeader(testbuf, "github.com/appneta/msgp/msgp", "testing")
}
testwr = testbuf
}
return outbuf, testbuf, f.PrintTo(gen.NewPrinter(mode, outbuf, testwr))
}
func writePkgHeader(b *bytes.Buffer, name string) {
b.WriteString("// Code generated by msgp\n") // will be ignored by Github PR diffs
b.WriteString("package ")
b.WriteString(name)
b.WriteByte('\n')
b.WriteString("// NOTE: THIS FILE WAS PRODUCED BY THE\n// MSGP CODE GENERATION TOOL (github.com/appneta/msgp)\n// DO NOT EDIT\n\n")
}
func writeImportHeader(b *bytes.Buffer, imports ...string) {
b.WriteString("import (\n")
for _, im := range imports {
fmt.Fprintf(b, "\t%q\n", im)
}
b.WriteString(")\n\n")
}
|
package main
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"os/exec"
"time"
_ "github.com/lib/pq"
)
type Configuration struct {
DbURL string `json:"DbURL"`
PgDataDir string `json:"PG-data-dir"`
PgBackupDir string `json:"PG-backup-dir"`
PgArchiveDir string `json:"PG-archive-dir"`
NumberOfBackups2Keep int `json:"NumberOfBackups2Keep"`
}
var (
db *sql.DB
config = Configuration{}
)
func main() {
t := time.Now()
sData := t.Format("20060102")
var err error
cfgFile := "./conf.json"
err = config.readFromFile(cfgFile)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
err = connect2Database(config.DbURL)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
defer db.Close()
err = createBackupTables()
if err != nil {
log.Fatal(err)
}
bkFile, bkLabel, lastIndex, err := getBkFileName(sData)
if err != nil {
log.Fatal(err)
}
log.Printf("start backup with label \"%s\"\n", bkLabel)
startBk, err := startBk(bkLabel)
if err != nil {
log.Fatal(err)
}
log.Printf("pg_start_backup: %s\n\n", startBk)
out, err := exec.Command("jar", "cvf", bkFile, config.PgDataDir).Output()
if err != nil {
log.Fatal(err)
}
log.Println(string(out))
archFile, err := finishBk()
if err != nil {
log.Fatal(err)
}
err = logBackup(bkFile, archFile, lastIndex)
if err != nil {
log.Fatal(err)
}
log.Printf("\n\ncleanup:\n")
archFile2Keep, logID, err := getLastNeededArchFile(config.NumberOfBackups2Keep)
err = deleteOldBackups(logID, archFile2Keep)
if err != nil {
log.Fatal(err)
}
log.Printf("stop backup \"%s\"\n\n\n", bkLabel)
}
func (c *Configuration) readFromFile(cfgFile string) error {
if _, err := os.Stat(cfgFile); os.IsNotExist(err) {
return err
}
file, err := os.Open(cfgFile)
if err != nil {
return err
}
defer file.Close()
decoder := json.NewDecoder(file)
err = decoder.Decode(&c)
if err != nil {
return err
}
return nil
}
func connect2Database(dbURL string) error {
var err error
db, err = sql.Open("postgres", dbURL)
if err != nil {
return errors.New("Can't connect to the database, go error " + fmt.Sprintf("%s", err))
}
err = db.Ping()
if err != nil {
return errors.New("Can't ping the database, go error " + fmt.Sprintf("%s", err))
}
return nil
}
func createBackupTables() error {
t1 := `
create table if not exists backup_log (
backup_log_id serial PRIMARY KEY,
backup_time timestamp DEFAULT statement_timestamp(),
backup_file varchar(256),
arch_file varchar(256),
last_file_index varchar(8)
)
`
_, err := db.Exec(t1)
if err != nil {
return err
}
return nil
}
func getBkFileName(sData string) (string, string, int, error) {
var i int
var bkFile string
var bkLabel string
query := `
select CAST(last_file_index AS integer)
from backup_log
where backup_log_id = (
select max(backup_log_id)
from backup_log
where backup_time::date = to_date($1, 'yyyymmdd')
)
`
err := db.QueryRow(query, sData).Scan(&i)
switch {
case err == sql.ErrNoRows:
i = 0
case err != nil:
return "", "", 0, err
}
for {
bkFile = fmt.Sprintf("%s/data_%s_%02d.zip", config.PgBackupDir, sData, i)
bkLabel = fmt.Sprintf("BK %s %02d", sData, i)
if _, err := os.Stat(bkFile); err == nil {
i++
continue
} else {
break
}
}
return bkFile, bkLabel, i, nil
}
func startBk(bkLabel string) (string, error) {
var startBk string
query := "SELECT pg_start_backup($1)::text"
err := db.QueryRow(query, bkLabel).Scan(&startBk)
switch {
case err == sql.ErrNoRows:
return "", err
case err != nil:
return "", err
}
return startBk, nil
}
func finishBk() (string, error) {
var archFile2Keep string
query := "SELECT file_name from pg_xlogfile_name_offset(pg_stop_backup())"
err := db.QueryRow(query).Scan(&archFile2Keep)
switch {
case err == sql.ErrNoRows:
return "", err
case err != nil:
return "", err
}
return archFile2Keep, nil
}
func logBackup(bkFile string, archFile string, lastFileIndex int) error {
query := `
insert into backup_log (
backup_file,
arch_file,
last_file_index
) values ($1, $2, $3)
`
sIndex := fmt.Sprintf("%02d", lastFileIndex)
_, err := db.Exec(query, bkFile, archFile, sIndex)
if err != nil {
return err
}
return nil
}
func getLastNeededArchFile(nrBackups2Keep int) (string, int, error) {
var archFile string
var logID int
query := `
select arch_file,
backup_log_id
from backup_log
order by backup_log_id desc
limit $1
`
rows, err := db.Query(query, nrBackups2Keep)
if err != nil {
return "", 0, err
}
defer rows.Close()
i := 0
for rows.Next() {
i++
err = rows.Scan(&archFile, &logID)
if err != nil {
return "", 0, err
}
}
if err := rows.Err(); err != nil {
return "", 0, err
}
if i < nrBackups2Keep {
archFile = ""
logID = -1
}
return archFile, logID, nil
}
func deleteOldBackups(logID int, archFile2Keep string) error {
var bkFile string
query := `
select backup_file
from backup_log
where backup_log_id < $1
order by backup_log_id
`
rows, err := db.Query(query, logID)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&bkFile)
if err != nil {
return err
}
log.Printf("Delete \"%s\"\n", bkFile)
err = os.Remove(bkFile)
if err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return err
}
query = "delete from backup_log where backup_log_id < $1"
_, err = db.Exec(query, logID)
if err != nil {
return err
}
if len(archFile2Keep) > 0 {
log.Printf("pg_archivecleanup -d %s %s\n", config.PgArchiveDir, archFile2Keep)
out, err := exec.Command("pg_archivecleanup", "-d", config.PgArchiveDir, archFile2Keep).Output()
if err != nil {
return err
}
fmt.Println(string(out))
}
return nil
}
Log 2 file
package main
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"os/exec"
"time"
_ "github.com/lib/pq"
)
type Configuration struct {
DbURL string `json:"DbURL"`
PgDataDir string `json:"PG-data-dir"`
PgBackupDir string `json:"PG-backup-dir"`
PgArchiveDir string `json:"PG-archive-dir"`
NumberOfBackups2Keep int `json:"NumberOfBackups2Keep"`
}
var (
db *sql.DB
config = Configuration{}
)
func main() {
t := time.Now()
sData := t.Format("20060102")
var err error
logFile, err := os.OpenFile(fmt.Sprintf("logs/backup_%s.txt", sData), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatal(err)
}
defer logFile.Close()
log.SetOutput(logFile)
cfgFile := "./conf.json"
err = config.readFromFile(cfgFile)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
err = connect2Database(config.DbURL)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
defer db.Close()
err = createBackupTables()
if err != nil {
log.Fatal(err)
}
bkFile, bkLabel, lastIndex, err := getBkFileName(sData)
if err != nil {
log.Fatal(err)
}
log.Printf("start backup with label \"%s\"\n", bkLabel)
startBk, err := startBk(bkLabel)
if err != nil {
log.Fatal(err)
}
log.Printf("pg_start_backup: %s\n\n", startBk)
out, err := exec.Command("jar", "cvf", bkFile, config.PgDataDir).Output()
if err != nil {
log.Fatal(err)
}
log.Println(string(out))
archFile, err := finishBk()
if err != nil {
log.Fatal(err)
}
err = logBackup(bkFile, archFile, lastIndex)
if err != nil {
log.Fatal(err)
}
log.Printf("\n\ncleanup:\n")
archFile2Keep, logID, err := getLastNeededArchFile(config.NumberOfBackups2Keep)
err = deleteOldBackups(logID, archFile2Keep)
if err != nil {
log.Fatal(err)
}
log.Printf("stop backup \"%s\"\n\n\n", bkLabel)
}
func (c *Configuration) readFromFile(cfgFile string) error {
if _, err := os.Stat(cfgFile); os.IsNotExist(err) {
return err
}
file, err := os.Open(cfgFile)
if err != nil {
return err
}
defer file.Close()
decoder := json.NewDecoder(file)
err = decoder.Decode(&c)
if err != nil {
return err
}
return nil
}
func connect2Database(dbURL string) error {
var err error
db, err = sql.Open("postgres", dbURL)
if err != nil {
return errors.New("Can't connect to the database, go error " + fmt.Sprintf("%s", err))
}
err = db.Ping()
if err != nil {
return errors.New("Can't ping the database, go error " + fmt.Sprintf("%s", err))
}
return nil
}
func createBackupTables() error {
t1 := `
create table if not exists backup_log (
backup_log_id serial PRIMARY KEY,
backup_time timestamp DEFAULT statement_timestamp(),
backup_file varchar(256),
arch_file varchar(256),
last_file_index varchar(8)
)
`
_, err := db.Exec(t1)
if err != nil {
return err
}
return nil
}
func getBkFileName(sData string) (string, string, int, error) {
var i int
var bkFile string
var bkLabel string
query := `
select CAST(last_file_index AS integer)
from backup_log
where backup_log_id = (
select max(backup_log_id)
from backup_log
where backup_time::date = to_date($1, 'yyyymmdd')
)
`
err := db.QueryRow(query, sData).Scan(&i)
switch {
case err == sql.ErrNoRows:
i = 0
case err != nil:
return "", "", 0, err
}
for {
bkFile = fmt.Sprintf("%s/data_%s_%02d.zip", config.PgBackupDir, sData, i)
bkLabel = fmt.Sprintf("BK %s %02d", sData, i)
if _, err := os.Stat(bkFile); err == nil {
i++
continue
} else {
break
}
}
return bkFile, bkLabel, i, nil
}
func startBk(bkLabel string) (string, error) {
var startBk string
query := "SELECT pg_start_backup($1)::text"
err := db.QueryRow(query, bkLabel).Scan(&startBk)
switch {
case err == sql.ErrNoRows:
return "", err
case err != nil:
return "", err
}
return startBk, nil
}
func finishBk() (string, error) {
var archFile2Keep string
query := "SELECT file_name from pg_xlogfile_name_offset(pg_stop_backup())"
err := db.QueryRow(query).Scan(&archFile2Keep)
switch {
case err == sql.ErrNoRows:
return "", err
case err != nil:
return "", err
}
return archFile2Keep, nil
}
func logBackup(bkFile string, archFile string, lastFileIndex int) error {
query := `
insert into backup_log (
backup_file,
arch_file,
last_file_index
) values ($1, $2, $3)
`
sIndex := fmt.Sprintf("%02d", lastFileIndex)
_, err := db.Exec(query, bkFile, archFile, sIndex)
if err != nil {
return err
}
return nil
}
func getLastNeededArchFile(nrBackups2Keep int) (string, int, error) {
var archFile string
var logID int
query := `
select arch_file,
backup_log_id
from backup_log
order by backup_log_id desc
limit $1
`
rows, err := db.Query(query, nrBackups2Keep)
if err != nil {
return "", 0, err
}
defer rows.Close()
i := 0
for rows.Next() {
i++
err = rows.Scan(&archFile, &logID)
if err != nil {
return "", 0, err
}
}
if err := rows.Err(); err != nil {
return "", 0, err
}
if i < nrBackups2Keep {
archFile = ""
logID = -1
}
return archFile, logID, nil
}
func deleteOldBackups(logID int, archFile2Keep string) error {
var bkFile string
query := `
select backup_file
from backup_log
where backup_log_id < $1
order by backup_log_id
`
rows, err := db.Query(query, logID)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&bkFile)
if err != nil {
return err
}
log.Printf("Delete \"%s\"\n", bkFile)
if _, err := os.Stat(bkFile); err == nil {
err = os.Remove(bkFile)
if err != nil {
return err
}
}
if err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return err
}
query = "delete from backup_log where backup_log_id < $1"
_, err = db.Exec(query, logID)
if err != nil {
return err
}
if len(archFile2Keep) > 0 {
log.Printf("pg_archivecleanup -d %s %s\n", config.PgArchiveDir, archFile2Keep)
out, err := exec.Command("pg_archivecleanup", "-d", config.PgArchiveDir, archFile2Keep).Output()
if err != nil {
return err
}
log.Println(string(out))
}
return nil
}
|
package aoc18
import (
"aoc18/problems/p01"
"aoc18/problems/p02"
"aoc18/problems/p03"
"aoc18/problems/p04"
"aoc18/problems/p05"
"aoc18/problems/p06"
"aoc18/problems/p07"
"aoc18/problems/p08"
"aoc18/problems/p09"
"aoc18/problems/p10"
"aoc18/problems/p11"
"aoc18/problems/p12"
"aoc18/problems/p13"
"runner"
)
// Run all the AOC 2018 puzzles
func Run(dataDir string) {
// Note: Add new puzzles here.
puzzles := []runner.Puzzle{
{"AOC 2018.01", "aoc/2018/day1.txt", p01.Solve, "416", "56752"},
{"AOC 2018.02", "aoc/2018/day2.txt", p02.Solve, "8715", "fvstwblgqkhpuixdrnevmaycd"},
{"AOC 2018.03", "aoc/2018/day3.txt", p03.Solve, "110546", "819"},
{"AOC 2018.04", "aoc/2018/day4.txt", p04.Solve, "4716", "117061"},
{"AOC 2018.05", "aoc/2018/day5.txt", p05.Solve, "9238", "4052"},
{"AOC 2018.06", "aoc/2018/day6.txt", p06.Solve, "3894", "39398"},
{"AOC 2018.07", "aoc/2018/day7.txt", p07.Solve, "IJLFUVDACEHGRZPNKQWSBTMXOY", "1072"},
{"AOC 2018.08", "aoc/2018/day8.txt", p08.Solve, "38567", "24453"},
{"AOC 2018.09", "aoc/2018/day9.txt", p09.Solve, "380705", "3171801582"},
{"AOC 2018.10", "aoc/2018/day10.txt", p10.Solve, "ZNNRZJXP", "10418"},
{"AOC 2018.11", "aoc/2018/day11.txt", p11.Solve, "21,77", "n/a"},
{"AOC 2018.12", "aoc/2018/day12.txt", p12.Solve, "3051", "1300000000669"},
{"AOC 2018.13", "aoc/2018/day13.txt", p13.Solve, "82,104", "121,22"},
// {"AOC 2018.14", "aoc/2018/day14.txt", p14.Solve, "n/a", "n/a"},
// {"AOC 2018.15", "aoc/2018/day15.txt", p15.Solve, "n/a", "n/a"},
// {"AOC 2018.16", "aoc/2018/day16.txt", p16.Solve, "n/a", "n/a"},
// {"AOC 2018.17", "aoc/2018/day17.txt", p17.Solve, "n/a", "n/a"},
// {"AOC 2018.18", "aoc/2018/day18.txt", p18.Solve, "n/a", "n/a"},
// {"AOC 2018.19", "aoc/2018/day19.txt", p19.Solve, "n/a", "n/a"},
// {"AOC 2018.20", "aoc/2018/day20.txt", p20.Solve, "n/a", "n/a"},
// {"AOC 2018.21", "aoc/2018/day21.txt", p21.Solve, "n/a", "n/a"},
// {"AOC 2018.22", "aoc/2018/day22.txt", p22.Solve, "n/a", "n/a"},
// {"AOC 2018.23", "aoc/2018/day23.txt", p23.Solve, "n/a", "n/a"},
// {"AOC 2018.24", "aoc/2018/day24.txt", p24.Solve, "n/a", "n/a"},
// {"AOC 2018.25", "aoc/2018/day25.txt", p25.Solve, "n/a", "n/a"},
}
runner.Run(dataDir, puzzles)
}
Added solution for AoC 2018 Day 11 Part 2
package aoc18
import (
"aoc18/problems/p01"
"aoc18/problems/p02"
"aoc18/problems/p03"
"aoc18/problems/p04"
"aoc18/problems/p05"
"aoc18/problems/p06"
"aoc18/problems/p07"
"aoc18/problems/p08"
"aoc18/problems/p09"
"aoc18/problems/p10"
"aoc18/problems/p11"
"aoc18/problems/p12"
"aoc18/problems/p13"
"runner"
)
// Run all the AOC 2018 puzzles
func Run(dataDir string) {
// Note: Add new puzzles here.
puzzles := []runner.Puzzle{
{"AOC 2018.01", "aoc/2018/day1.txt", p01.Solve, "416", "56752"},
{"AOC 2018.02", "aoc/2018/day2.txt", p02.Solve, "8715", "fvstwblgqkhpuixdrnevmaycd"},
{"AOC 2018.03", "aoc/2018/day3.txt", p03.Solve, "110546", "819"},
{"AOC 2018.04", "aoc/2018/day4.txt", p04.Solve, "4716", "117061"},
{"AOC 2018.05", "aoc/2018/day5.txt", p05.Solve, "9238", "4052"},
{"AOC 2018.06", "aoc/2018/day6.txt", p06.Solve, "3894", "39398"},
{"AOC 2018.07", "aoc/2018/day7.txt", p07.Solve, "IJLFUVDACEHGRZPNKQWSBTMXOY", "1072"},
{"AOC 2018.08", "aoc/2018/day8.txt", p08.Solve, "38567", "24453"},
{"AOC 2018.09", "aoc/2018/day9.txt", p09.Solve, "380705", "3171801582"},
{"AOC 2018.10", "aoc/2018/day10.txt", p10.Solve, "ZNNRZJXP", "10418"},
{"AOC 2018.11", "aoc/2018/day11.txt", p11.Solve, "21,77", "224,222,27"},
{"AOC 2018.12", "aoc/2018/day12.txt", p12.Solve, "3051", "1300000000669"},
{"AOC 2018.13", "aoc/2018/day13.txt", p13.Solve, "82,104", "121,22"},
// {"AOC 2018.14", "aoc/2018/day14.txt", p14.Solve, "n/a", "n/a"},
// {"AOC 2018.15", "aoc/2018/day15.txt", p15.Solve, "n/a", "n/a"},
// {"AOC 2018.16", "aoc/2018/day16.txt", p16.Solve, "n/a", "n/a"},
// {"AOC 2018.17", "aoc/2018/day17.txt", p17.Solve, "n/a", "n/a"},
// {"AOC 2018.18", "aoc/2018/day18.txt", p18.Solve, "n/a", "n/a"},
// {"AOC 2018.19", "aoc/2018/day19.txt", p19.Solve, "n/a", "n/a"},
// {"AOC 2018.20", "aoc/2018/day20.txt", p20.Solve, "n/a", "n/a"},
// {"AOC 2018.21", "aoc/2018/day21.txt", p21.Solve, "n/a", "n/a"},
// {"AOC 2018.22", "aoc/2018/day22.txt", p22.Solve, "n/a", "n/a"},
// {"AOC 2018.23", "aoc/2018/day23.txt", p23.Solve, "n/a", "n/a"},
// {"AOC 2018.24", "aoc/2018/day24.txt", p24.Solve, "n/a", "n/a"},
// {"AOC 2018.25", "aoc/2018/day25.txt", p25.Solve, "n/a", "n/a"},
}
runner.Run(dataDir, puzzles)
}
|
package fusetest
import (
"fmt"
"io"
"os"
"os/exec"
)
// KD is a simple struct for simplifying calling KD commands as a subprocess.
type KD struct {
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
}
func NewKD() *KD {
return &KD{
Stdout: os.Stdout,
Stderr: os.Stderr,
Stdin: os.Stdin,
}
}
func (kd *KD) run(args ...string) error {
cmd := exec.Command("kd", args...)
// Assign stdout/etc for visible progress.
cmd.Stdout = kd.Stdout
cmd.Stderr = kd.Stderr
cmd.Stdin = kd.Stdin
if err := cmd.Run(); err != nil {
return err
}
return nil
}
func (kd *KD) Mount(machine, remoteDir, localDir string) error {
return kd.run("mount", fmt.Sprintf("%s:%s", machine, remoteDir), localDir)
}
func (kd *KD) MountWithNoPrefetch(machine, remoteDir, localDir string) error {
return kd.run(
"mount", "--noprefetch-meta", fmt.Sprintf("%s:%s", machine, remoteDir), localDir,
)
}
func (kd *KD) MountWithPrefetchAll(machine, remoteDir, localDir string) error {
return kd.run(
"mount", "--prefetch-all", fmt.Sprintf("%s:%s", machine, remoteDir), localDir,
)
}
func (kd *KD) Unmount(machine string) error {
return kd.run("unmount", machine)
}
fusetest: Dried up a join
package fusetest
import (
"io"
"os"
"os/exec"
"strings"
)
// KD is a simple struct for simplifying calling KD commands as a subprocess.
type KD struct {
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
}
func NewKD() *KD {
return &KD{
Stdout: os.Stdout,
Stderr: os.Stderr,
Stdin: os.Stdin,
}
}
func (kd *KD) run(args ...string) error {
cmd := exec.Command("kd", args...)
// Assign stdout/etc for visible progress.
cmd.Stdout = kd.Stdout
cmd.Stderr = kd.Stderr
cmd.Stdin = kd.Stdin
return cmd.Run()
}
func (kd *KD) Mount(machine, remoteDir, localDir string) error {
return kd.run("mount", joinWithColon(machine, remoteDir), localDir)
}
func (kd *KD) MountWithNoPrefetch(machine, remoteDir, localDir string) error {
return kd.run(
"mount", "--noprefetch-meta", joinWithColon(machine, remoteDir), localDir,
)
}
func (kd *KD) MountWithPrefetchAll(machine, remoteDir, localDir string) error {
return kd.run(
"mount", "--prefetch-all", joinWithColon(machine, remoteDir), localDir,
)
}
func (kd *KD) Unmount(machine string) error {
return kd.run("unmount", machine)
}
func joinWithColon(s ...string) string {
return strings.Join(s, ":")
}
|
package main
import (
"encoding/json"
"encoding/xml"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/qiniu/checkstyle"
)
var config = flag.String("config", "", "config json file")
var reporterOption = flag.String("reporter", "plain", "report output format, plain or xml")
var checker checkstyle.Checker
var reporter Reporter
type Ignore struct {
Files []string `json:"ignore"`
}
var ignore Ignore
type Reporter interface {
ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem)
Report()
}
type plainReporter struct {
normalProblems []*checkstyle.Problem
fatalProblems []*checkstyle.Problem
}
func (_ *plainReporter) printProblems(ps []*checkstyle.Problem) {
for _, p := range ps {
log.Printf("%v: %s\n", p.Position, p.Description)
}
}
func (p *plainReporter) Report() {
if len(p.normalProblems) != 0 {
log.Printf(" ========= There are %d normal problems ========= \n", len(p.normalProblems))
p.printProblems(p.normalProblems)
}
if len(p.fatalProblems) != 0 {
log.Printf(" ========= There are %d fatal problems ========= \n", len(p.fatalProblems))
p.printProblems(p.fatalProblems)
os.Exit(1)
}
if len(p.normalProblems) == 0 && len(p.fatalProblems) == 0 {
log.Println(" ========= There are no problems ========= ")
}
}
func (p *plainReporter) ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem) {
for i, problem := range problems {
if checker.IsFatal(&problem) {
p.fatalProblems = append(p.fatalProblems, &problems[i])
} else {
p.normalProblems = append(p.normalProblems, &problems[i])
}
}
}
type xmlReporter struct {
problems map[string][]checkstyle.Problem
hasFatal bool
}
func (x *xmlReporter) printProblems(ps []checkstyle.Problem) {
format := "\t\t<error line=\"%d\" column=\"%d\" severity=\"%s\" message=\"%s\" source=\"%s\" />\n"
for _, p := range ps {
severity := "warning"
if checker.IsFatal(&p) {
severity = "error"
x.hasFatal = true
}
log.Printf(format, p.Position.Line, p.Position.Column, severity, p.Description, p.Type)
}
}
func (x *xmlReporter) Report() {
log.SetFlags(0)
log.Print(xml.Header)
log.Println(`<checkstyle version="4.3">`)
for k, v := range x.problems {
if len(v) == 0 {
continue
}
log.Printf("\t<file name=\"%s\">\n", k)
x.printProblems(v)
log.Println("\t</file>")
}
log.Println("</checkstyle>")
if x.hasFatal {
os.Exit(1)
}
}
func (x *xmlReporter) ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem) {
x.problems[file] = problems
}
func main() {
flag.Parse()
files := flag.Args()
if config == nil {
log.Fatalln("No config")
}
if reporterOption == nil || *reporterOption != "xml" {
reporter = &plainReporter{}
} else {
reporter = &xmlReporter{problems: map[string][]checkstyle.Problem{}}
}
conf, err := ioutil.ReadFile(*config)
if err != nil {
log.Fatalf("Open config %v fail %v\n", *config, err)
}
err = json.Unmarshal(conf, &ignore)
if err != nil {
log.Fatalf("Parse config %v fail \n", *config, err)
}
checker, err = checkstyle.New(conf)
if err != nil {
log.Fatalf("New checker fail %v\n", err)
}
for _, v := range files {
if isDir(v) {
checkDir(v)
} else {
checkFile(v)
}
}
reporter.Report()
}
func isDir(filename string) bool {
fi, err := os.Stat(filename)
return err == nil && fi.IsDir()
}
func checkFile(fileName string) {
file, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatalf("Read File Fail %v %v\n", fileName, err)
}
ps, err := checker.Check(fileName, file)
if err != nil {
log.Fatalf("Parse File Fail %v %v\n", fileName, err)
}
reporter.ReceiveProblems(checker, fileName, ps)
}
func isIgnoreFile(fileName string) bool {
for _, v := range ignore.Files {
if ok, _ := filepath.Match(v, fileName); ok {
return true
}
}
return false
}
func isIgnoreDir(dir string) bool {
for _, v := range ignore.Files {
if ok, _ := filepath.Match(v, dir); ok {
return true
}
}
return false
}
func checkDir(dir string) {
if isIgnoreDir(dir) {
return
}
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err == nil && info.IsDir() && isIgnoreDir(path) {
return filepath.SkipDir
}
if err == nil && !info.IsDir() && strings.HasSuffix(path, ".go") && !isIgnoreFile(path) {
checkFile(path)
}
return err
})
}
add type prefix for xml report
package main
import (
"encoding/json"
"encoding/xml"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/qiniu/checkstyle"
)
var config = flag.String("config", "", "config json file")
var reporterOption = flag.String("reporter", "plain", "report output format, plain or xml")
var checker checkstyle.Checker
var reporter Reporter
type Ignore struct {
Files []string `json:"ignore"`
}
var ignore Ignore
type Reporter interface {
ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem)
Report()
}
type plainReporter struct {
normalProblems []*checkstyle.Problem
fatalProblems []*checkstyle.Problem
}
func (_ *plainReporter) printProblems(ps []*checkstyle.Problem) {
for _, p := range ps {
log.Printf("%v: %s\n", p.Position, p.Description)
}
}
func (p *plainReporter) Report() {
if len(p.normalProblems) != 0 {
log.Printf(" ========= There are %d normal problems ========= \n", len(p.normalProblems))
p.printProblems(p.normalProblems)
}
if len(p.fatalProblems) != 0 {
log.Printf(" ========= There are %d fatal problems ========= \n", len(p.fatalProblems))
p.printProblems(p.fatalProblems)
os.Exit(1)
}
if len(p.normalProblems) == 0 && len(p.fatalProblems) == 0 {
log.Println(" ========= There are no problems ========= ")
}
}
func (p *plainReporter) ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem) {
for i, problem := range problems {
if checker.IsFatal(&problem) {
p.fatalProblems = append(p.fatalProblems, &problems[i])
} else {
p.normalProblems = append(p.normalProblems, &problems[i])
}
}
}
type xmlReporter struct {
problems map[string][]checkstyle.Problem
hasFatal bool
}
func (x *xmlReporter) printProblems(ps []checkstyle.Problem) {
format := "\t\t<error line=\"%d\" column=\"%d\" severity=\"%s\" message=\"%s\" source=\"checkstyle.%s\" />\n"
for _, p := range ps {
severity := "warning"
if checker.IsFatal(&p) {
severity = "error"
x.hasFatal = true
}
log.Printf(format, p.Position.Line, p.Position.Column, severity, p.Description, p.Type)
}
}
func (x *xmlReporter) Report() {
log.SetFlags(0)
log.Print(xml.Header)
log.Println(`<checkstyle version="4.3">`)
for k, v := range x.problems {
if len(v) == 0 {
continue
}
log.Printf("\t<file name=\"%s\">\n", k)
x.printProblems(v)
log.Println("\t</file>")
}
log.Println("</checkstyle>")
if x.hasFatal {
os.Exit(1)
}
}
func (x *xmlReporter) ReceiveProblems(checker checkstyle.Checker, file string, problems []checkstyle.Problem) {
x.problems[file] = problems
}
func main() {
flag.Parse()
files := flag.Args()
if config == nil {
log.Fatalln("No config")
}
if reporterOption == nil || *reporterOption != "xml" {
reporter = &plainReporter{}
} else {
reporter = &xmlReporter{problems: map[string][]checkstyle.Problem{}}
}
conf, err := ioutil.ReadFile(*config)
if err != nil {
log.Fatalf("Open config %v fail %v\n", *config, err)
}
err = json.Unmarshal(conf, &ignore)
if err != nil {
log.Fatalf("Parse config %v fail \n", *config, err)
}
checker, err = checkstyle.New(conf)
if err != nil {
log.Fatalf("New checker fail %v\n", err)
}
for _, v := range files {
if isDir(v) {
checkDir(v)
} else {
checkFile(v)
}
}
reporter.Report()
}
func isDir(filename string) bool {
fi, err := os.Stat(filename)
return err == nil && fi.IsDir()
}
func checkFile(fileName string) {
file, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatalf("Read File Fail %v %v\n", fileName, err)
}
ps, err := checker.Check(fileName, file)
if err != nil {
log.Fatalf("Parse File Fail %v %v\n", fileName, err)
}
reporter.ReceiveProblems(checker, fileName, ps)
}
func isIgnoreFile(fileName string) bool {
for _, v := range ignore.Files {
if ok, _ := filepath.Match(v, fileName); ok {
return true
}
}
return false
}
func isIgnoreDir(dir string) bool {
for _, v := range ignore.Files {
if ok, _ := filepath.Match(v, dir); ok {
return true
}
}
return false
}
func checkDir(dir string) {
if isIgnoreDir(dir) {
return
}
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err == nil && info.IsDir() && isIgnoreDir(path) {
return filepath.SkipDir
}
if err == nil && !info.IsDir() && strings.HasSuffix(path, ".go") && !isIgnoreFile(path) {
checkFile(path)
}
return err
})
}
|
package main
import (
"bufio"
"crypto/sha256"
"fmt"
"os"
)
func main() {
reader := bufio.NewReader(os.Stdin)
buf := make([]byte, 1024)
digest := sha256.New()
for {
len, err := reader.Read(buf)
if len == 0 {
break;
}
if err != nil {
fmt.Fprintf(os.Stderr, "read error: %v\n", err)
os.Exit(1)
}
digest.Write(buf[:len])
}
fmt.Printf("%x\n", digest.Sum(nil))
}
Modified ch04/p02_sha256.
package main
import (
"bufio"
"crypto/sha256"
"crypto/sha512"
"flag"
"fmt"
"hash"
"os"
)
func main() {
var length int
flag.IntVar(&length, "l", 256, "specify the length of sha. 256, 384, 512")
flag.Parse()
var digest hash.Hash
if length == 256 {
digest = sha256.New()
} else if length == 384 {
digest = sha512.New384()
} else if length == 512 {
digest = sha512.New()
} else {
fmt.Fprintf(os.Stderr, "Unrecognized algorithm: %d\n", length)
flag.PrintDefaults()
os.Exit(1)
}
reader := bufio.NewReader(os.Stdin)
buf := make([]byte, 1024)
for {
len, err := reader.Read(buf)
if len == 0 {
break;
}
if err != nil {
fmt.Fprintf(os.Stderr, "read error: %v\n", err)
os.Exit(1)
}
digest.Write(buf[:len])
}
fmt.Printf("%x\n", digest.Sum(nil))
}
|
package main
import (
"io"
"log"
"net"
"os"
)
func main() {
conn, err := net.Dial("tcp", "localhost:8000")
if err != nil {
log.Fatal(err)
}
done := make(chan struct{})
go func() {
io.Copy(os.Stdout, conn)
log.Println("done")
done <- struct{}{}
}()
mustCopy(conn, os.Stdin)
conn.Close()
<-done
}
func mustCopy(dst io.Writer, src io.Reader) {
if _, err := io.Copy(dst, src); err != nil {
log.Fatal(err)
}
}
[8.3] add solution with close write.
package main
import (
"io"
"log"
"net"
"os"
)
func main() {
conn, err := net.Dial("tcp", "localhost:8000")
if err != nil {
log.Fatal(err)
}
done := make(chan struct{})
go func() {
mustCopy(os.Stdout, conn)
log.Println("done")
done <- struct{}{}
}()
mustCopy(conn, os.Stdin)
if err := conn.(*net.TCPConn).CloseWrite(); err != nil {
log.Fatal(err)
}
<-done
}
func mustCopy(dst io.Writer, src io.Reader) {
if _, err := io.Copy(dst, src); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode/utf8"
)
var debugEnabled bool
var pathListSep string
func printTo(w io.Writer, i ...interface{}) {
s := fmt.Sprint(i...)
fmt.Fprintln(w, strings.TrimSuffix(s, "\n"))
}
func Warn(i ...interface{}) {
printTo(os.Stderr, i...)
}
func Info(i ...interface{}) {
printTo(os.Stdout, i...)
}
func Debug(i ...interface{}) {
if debugEnabled {
printTo(os.Stdout, i...)
}
}
func InitDebug() {
if os.Getenv("GOACI_DEBUG") != "" {
debugEnabled = true
}
}
// ListSeparator returns filepath.ListSeparator rune as a string.
func ListSeparator() string {
if pathListSep == "" {
len := utf8.RuneLen(filepath.ListSeparator)
if len < 0 {
panic("filepath.ListSeparator is not valid utf8?!")
}
buf := make([]byte, len)
len = utf8.EncodeRune(buf, filepath.ListSeparator)
pathListSep = string(buf[:len])
}
return pathListSep
}
Put util code in proj2aci package
Also add a rather specfic function for checking directory existence if
directory name is not empty. Will be used in builders.
package proj2aci
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode/utf8"
)
var debugEnabled bool
var pathListSep string
// DirExists checks if directory exists if given path is not empty.
//
// This function is rather specific as it is mostly used for checking
// overrides validity (like overriding temporary directory, where
// empty string means "do not override").
func DirExists(path string) bool {
if path != "" {
fi, err := os.Stat(path)
if err != nil || !fi.IsDir() {
return false
}
}
return true
}
func printTo(w io.Writer, i ...interface{}) {
s := fmt.Sprint(i...)
fmt.Fprintln(w, strings.TrimSuffix(s, "\n"))
}
func Warn(i ...interface{}) {
printTo(os.Stderr, i...)
}
func Info(i ...interface{}) {
printTo(os.Stdout, i...)
}
func Debug(i ...interface{}) {
if debugEnabled {
printTo(os.Stdout, i...)
}
}
func InitDebug() {
if os.Getenv("GOACI_DEBUG") != "" {
debugEnabled = true
}
}
// listSeparator returns filepath.ListSeparator rune as a string.
func listSeparator() string {
if pathListSep == "" {
len := utf8.RuneLen(filepath.ListSeparator)
if len < 0 {
panic("filepath.ListSeparator is not valid utf8?!")
}
buf := make([]byte, len)
len = utf8.EncodeRune(buf, filepath.ListSeparator)
pathListSep = string(buf[:len])
}
return pathListSep
}
|
package client
import (
"errors"
"github.com/flynn/go-discover/discover"
"github.com/flynn/rpcplus"
"github.com/flynn/sampi/types"
)
func New() (*Client, error) {
disc, err := discover.NewClient()
if err != nil {
return nil, err
}
addrs := disc.Services("flynn-sampi").OnlineAddrs()
if len(addrs) == 0 {
return nil, errors.New("sampi: no servers found")
}
c, err := rpcplus.DialHTTP("tcp", addrs[0])
return &Client{c}, err
}
type Client struct {
c *rpcplus.Client
}
func (c *Client) State() (map[string]sampi.Host, error) {
var state map[string]sampi.Host
err := c.c.Call("Scheduler.State", struct{}{}, &state)
return state, err
}
func (c *Client) Schedule(req *sampi.ScheduleReq) (*sampi.ScheduleRes, error) {
var res sampi.ScheduleRes
err := c.c.Call("Scheduler.Schedule", req, &res)
return &res, err
}
pkg/sampi: Update client for new discover API
package client
import (
"errors"
"github.com/flynn/go-discover/discover"
"github.com/flynn/rpcplus"
"github.com/flynn/sampi/types"
)
func New() (*Client, error) {
disc, err := discover.NewClient()
if err != nil {
return nil, err
}
services, err := disc.Services("flynn-sampi")
if err != nil {
return nil, err
}
addrs := services.OnlineAddrs()
if len(addrs) == 0 {
return nil, errors.New("sampi: no servers found")
}
c, err := rpcplus.DialHTTP("tcp", addrs[0])
return &Client{c}, err
}
type Client struct {
c *rpcplus.Client
}
func (c *Client) State() (map[string]sampi.Host, error) {
var state map[string]sampi.Host
err := c.c.Call("Scheduler.State", struct{}{}, &state)
return state, err
}
func (c *Client) Schedule(req *sampi.ScheduleReq) (*sampi.ScheduleRes, error) {
var res sampi.ScheduleRes
err := c.c.Call("Scheduler.Schedule", req, &res)
return &res, err
}
|
package virtualbox
import (
"archive/tar"
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/boot2docker/boot2docker-cli/driver"
flag "github.com/ogier/pflag"
)
type Flag int
// Flag names in lowercases to be consistent with VBoxManage options.
const (
F_acpi Flag = 1 << iota
F_ioapic
F_rtcuseutc
F_cpuhotplug
F_pae
F_longmode
F_synthcpu
F_hpet
F_hwvirtex
F_triplefaultreset
F_nestedpaging
F_largepages
F_vtxvpid
F_vtxux
F_accelerate3d
)
type DriverCfg struct {
VBM string // Path to VBoxManage utility.
VMDK string // base VMDK to use as persistent disk.
shares shareSlice
// see also func ConfigFlags later in this file
}
var shareDefault string // set in ConfigFlags - this is what gets filled in for "shares" if it's empty
var (
verbose bool // Verbose mode (Local copy of B2D.Verbose).
cfg DriverCfg
)
func init() {
if err := driver.Register("virtualbox", InitFunc); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver. Error : %s", err.Error())
os.Exit(1)
}
if err := driver.RegisterConfig("virtualbox", ConfigFlags); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver config. Error : %s", err.Error())
os.Exit(1)
}
}
// Initialize the Machine.
func InitFunc(mc *driver.MachineConfig) (driver.Machine, error) {
verbose = mc.Verbose
m, err := GetMachine(mc.VM)
if err != nil && mc.Init {
return CreateMachine(mc)
}
return m, err
}
type shareSlice map[string]string
const shareSliceSep = "="
func (s shareSlice) String() string {
var ret []string
for name, dir := range s {
ret = append(ret, fmt.Sprintf("%s%s%s", dir, shareSliceSep, name))
}
return fmt.Sprintf("[%s]", strings.Join(ret, " "))
}
func (s *shareSlice) Set(shareDir string) error {
var shareName string
if i := strings.Index(shareDir, shareSliceSep); i >= 0 {
shareName = shareDir[i+1:]
shareDir = shareDir[:i]
}
if shareName == "" {
// parts of the VBox internal code are buggy with share names that start with "/"
shareName = strings.TrimLeft(shareDir, "/")
// TODO do some basic Windows -> MSYS path conversion
// ie, s!^([a-z]+):[/\\]+!\1/!; s!\\!/!g
}
if *s == nil {
*s = shareSlice{}
}
(*s)[shareName] = shareDir
return nil
}
// Add cmdline params for this driver
func ConfigFlags(B2D *driver.MachineConfig, flags *flag.FlagSet) error {
//B2D.DriverCfg["virtualbox"] = cfg
flags.StringVar(&cfg.VMDK, "basevmdk", "", "Path to VMDK to use as base for persistent partition")
cfg.VBM = "VBoxManage"
if runtime.GOOS == "windows" {
p := "C:\\Program Files\\Oracle\\VirtualBox"
if t := os.Getenv("VBOX_INSTALL_PATH"); t != "" {
p = t
} else if t = os.Getenv("VBOX_MSI_INSTALL_PATH"); t != "" {
p = t
}
cfg.VBM = filepath.Join(p, "VBoxManage.exe")
}
flags.StringVar(&cfg.VBM, "vbm", cfg.VBM, "path to VirtualBox management utility.")
// TODO once boot2docker improves, replace this all with homeDir() from config.go so we only share the current user's HOME by default
shareDefault = "disable"
switch runtime.GOOS {
case "darwin":
shareDefault = "/Users" + shareSliceSep + "Users"
case "windows":
shareDefault = "C:\\Users" + shareSliceSep + "c/Users"
}
var defaultText string
if shareDefault != "disable" {
defaultText = "(defaults to '" + shareDefault + "' if no shares are specified; use 'disable' to explicitly prevent any shares from being created) "
}
flags.Var(&cfg.shares, "vbox-share", fmt.Sprintf("%sList of directories to share during 'init' via VirtualBox Guest Additions, with optional labels", defaultText))
return nil
}
// Convert bool to "on"/"off"
func bool2string(b bool) string {
if b {
return "on"
}
return "off"
}
// Test if flag is set. Return "on" or "off".
func (f Flag) Get(o Flag) string {
return bool2string(f&o == o)
}
// Machine information.
type Machine struct {
Name string
UUID string
State driver.MachineState
CPUs uint
Memory uint // main memory (in MB)
VRAM uint // video memory (in MB)
CfgFile string
BaseFolder string
OSType string
Flag Flag
BootOrder []string // max 4 slots, each in {none|floppy|dvd|disk|net}
DockerPort uint
SSHPort uint
SerialFile string
}
// Refresh reloads the machine information.
func (m *Machine) Refresh() error {
id := m.Name
if id == "" {
id = m.UUID
}
mm, err := GetMachine(id)
if err != nil {
return err
}
*m = *mm
return nil
}
// Start starts the machine.
func (m *Machine) Start() error {
switch m.State {
case driver.Paused:
return vbm("controlvm", m.Name, "resume")
case driver.Poweroff, driver.Saved, driver.Aborted:
return vbm("startvm", m.Name, "--type", "headless")
}
if err := m.Refresh(); err == nil {
if m.State != driver.Running {
return fmt.Errorf("Failed to start", m.Name)
}
}
return nil
}
// Suspend suspends the machine and saves its state to disk.
func (m *Machine) Save() error {
switch m.State {
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "savestate")
}
// Pause pauses the execution of the machine.
func (m *Machine) Pause() error {
switch m.State {
case driver.Paused, driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "pause")
}
// Stop gracefully stops the machine.
func (m *Machine) Stop() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
}
for m.State != driver.Poweroff { // busy wait until the machine is stopped
if err := vbm("controlvm", m.Name, "acpipowerbutton"); err != nil {
return err
}
time.Sleep(1 * time.Second)
if err := m.Refresh(); err != nil {
return err
}
}
return nil
}
// Poweroff forcefully stops the machine. State is lost and might corrupt the disk image.
func (m *Machine) Poweroff() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "poweroff")
}
// Restart gracefully restarts the machine.
func (m *Machine) Restart() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
if err := m.Stop(); err != nil {
return err
}
return m.Start()
}
// Reset forcefully restarts the machine. State is lost and might corrupt the disk image.
func (m *Machine) Reset() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
return vbm("controlvm", m.Name, "reset")
}
// Delete deletes the machine and associated disk images.
func (m *Machine) Delete() error {
if err := m.Poweroff(); err != nil {
return err
}
return vbm("unregistervm", m.Name, "--delete")
}
// Get current state
func (m *Machine) GetName() string {
return m.Name
}
// Get current state
func (m *Machine) GetState() driver.MachineState {
return m.State
}
// Get serial file
func (m *Machine) GetSerialFile() string {
return m.SerialFile
}
// Get Docker port
func (m *Machine) GetDockerPort() uint {
return m.DockerPort
}
// Get SSH port
func (m *Machine) GetSSHPort() uint {
return m.SSHPort
}
// GetMachine finds a machine by its name or UUID.
func GetMachine(id string) (*Machine, error) {
stdout, stderr, err := vbmOutErr("showvminfo", id, "--machinereadable")
if err != nil {
if reMachineNotFound.FindString(stderr) != "" {
return nil, ErrMachineNotExist
}
return nil, err
}
s := bufio.NewScanner(strings.NewReader(stdout))
m := &Machine{}
for s.Scan() {
res := reVMInfoLine.FindStringSubmatch(s.Text())
if res == nil {
continue
}
key := res[1]
if key == "" {
key = res[2]
}
val := res[3]
if val == "" {
val = res[4]
}
switch key {
case "name":
m.Name = val
case "UUID":
m.UUID = val
case "VMState":
m.State = driver.MachineState(val)
case "memory":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.Memory = uint(n)
case "cpus":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.CPUs = uint(n)
case "vram":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.VRAM = uint(n)
case "CfgFile":
m.CfgFile = val
m.BaseFolder = filepath.Dir(val)
case "uartmode1":
// uartmode1="server,/home/sven/.boot2docker/boot2docker-vm.sock"
vals := strings.Split(val, ",")
if len(vals) >= 2 {
m.SerialFile = vals[1]
}
default:
if strings.HasPrefix(key, "Forwarding(") {
// "Forwarding(\d*)" are ordered by the name inside the val, not fixed order.
// Forwarding(0)="docker,tcp,127.0.0.1,5555,,"
// Forwarding(1)="ssh,tcp,127.0.0.1,2222,,22"
vals := strings.Split(val, ",")
n, err := strconv.ParseUint(vals[3], 10, 32)
if err != nil {
return nil, err
}
switch vals[0] {
case "docker":
m.DockerPort = uint(n)
case "ssh":
m.SSHPort = uint(n)
}
}
}
}
if err := s.Err(); err != nil {
return nil, err
}
return m, nil
}
// ListMachines lists all registered machines.
func ListMachines() ([]string, error) {
out, err := vbmOut("list", "vms")
if err != nil {
return nil, err
}
ms := []string{}
s := bufio.NewScanner(strings.NewReader(out))
for s.Scan() {
res := reVMNameUUID.FindStringSubmatch(s.Text())
if res == nil {
continue
}
ms = append(ms, res[1])
}
if err := s.Err(); err != nil {
return nil, err
}
return ms, nil
}
// CreateMachine creates a new machine. If basefolder is empty, use default.
func CreateMachine(mc *driver.MachineConfig) (*Machine, error) {
if mc.VM == "" {
return nil, fmt.Errorf("machine name is empty")
}
// Check if a machine with the given name already exists.
machineNames, err := ListMachines()
if err != nil {
return nil, err
}
for _, m := range machineNames {
if m == mc.VM {
return nil, ErrMachineExist
}
}
// Create and register the machine.
args := []string{"createvm", "--name", mc.VM, "--register"}
if err := vbm(args...); err != nil {
return nil, err
}
m, err := GetMachine(mc.VM)
if err != nil {
return nil, err
}
// Configure VM for Boot2docker
SetExtra(mc.VM, "VBoxInternal/CPUM/EnableHVP", "1")
m.OSType = "Linux26_64"
m.CPUs = uint(runtime.NumCPU())
if m.CPUs > 32 {
m.CPUs = 32
}
m.Memory = mc.Memory
m.SerialFile = mc.SerialFile
m.Flag |= F_pae
m.Flag |= F_longmode // important: use x86-64 processor
m.Flag |= F_rtcuseutc
m.Flag |= F_acpi
m.Flag |= F_ioapic
m.Flag |= F_hpet
m.Flag |= F_hwvirtex
m.Flag |= F_vtxvpid
m.Flag |= F_largepages
m.Flag |= F_nestedpaging
// Set VM boot order
m.BootOrder = []string{"dvd"}
if err := m.Modify(); err != nil {
return m, err
}
// Set NIC #1 to use NAT
m.SetNIC(1, driver.NIC{Network: driver.NICNetNAT, Hardware: driver.VirtIO})
pfRules := map[string]driver.PFRule{
"ssh": {Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.SSHPort, GuestPort: driver.SSHPort},
}
if mc.DockerPort > 0 {
pfRules["docker"] = driver.PFRule{Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.DockerPort, GuestPort: driver.DockerPort}
}
for name, rule := range pfRules {
if err := m.AddNATPF(1, name, rule); err != nil {
return m, err
}
}
hostIFName, err := getHostOnlyNetworkInterface(mc)
if err != nil {
return m, err
}
// Set NIC #2 to use host-only
if err := m.SetNIC(2, driver.NIC{Network: driver.NICNetHostonly, Hardware: driver.VirtIO, HostonlyAdapter: hostIFName}); err != nil {
return m, err
}
// Set VM storage
if err := m.AddStorageCtl("SATA", driver.StorageController{SysBus: driver.SysBusSATA, HostIOCache: true, Bootable: true}); err != nil {
return m, err
}
// Attach ISO image
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 0, Device: 0, DriveType: driver.DriveDVD, Medium: mc.ISO}); err != nil {
return m, err
}
diskImg := filepath.Join(m.BaseFolder, fmt.Sprintf("%s.vmdk", mc.VM))
if _, err := os.Stat(diskImg); err != nil {
if !os.IsNotExist(err) {
return m, err
}
if cfg.VMDK != "" {
if err := copyDiskImage(diskImg, cfg.VMDK); err != nil {
return m, err
}
} else {
magicString := "boot2docker, please format-me"
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
// magicString first so the automount script knows to format the disk
file := &tar.Header{Name: magicString, Size: int64(len(magicString))}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(magicString)); err != nil {
return m, err
}
// .ssh/key.pub => authorized_keys
file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
pubKey, err := ioutil.ReadFile(mc.SSHKey + ".pub")
if err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
if err := tw.Close(); err != nil {
return m, err
}
if err := makeDiskImage(diskImg, mc.DiskSize, buf.Bytes()); err != nil {
return m, err
}
if verbose {
fmt.Println("Initializing disk with ssh keys")
fmt.Printf("WRITING: %s\n-----\n", buf)
}
}
}
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 1, Device: 0, DriveType: driver.DriveHDD, Medium: diskImg}); err != nil {
return m, err
}
// let VBoxService do nice magic automounting (when it's used)
if err := vbm("guestproperty", "set", mc.VM, "/VirtualBox/GuestAdd/SharedFolders/MountPrefix", "/"); err != nil {
return nil, err
}
if err := vbm("guestproperty", "set", mc.VM, "/VirtualBox/GuestAdd/SharedFolders/MountDir", "/"); err != nil {
return nil, err
}
// set up some shared folders as appropriate
if len(cfg.shares) == 0 {
cfg.shares.Set(shareDefault)
}
for shareName, shareDir := range cfg.shares {
if shareDir == "disable" {
continue
}
if _, err := os.Stat(shareDir); err != nil {
return nil, err
}
// woo, shareDir exists! let's carry on!
if err := vbm("sharedfolder", "add", mc.VM, "--name", shareName, "--hostpath", shareDir, "--automount"); err != nil {
return nil, err
}
// enable symlinks
if err := vbm("setextradata", mc.VM, "VBoxInternal2/SharedFoldersEnableSymlinksCreate/"+shareName, "1"); err != nil {
return nil, err
}
}
return m, nil
}
// Modify changes the settings of the machine.
func (m *Machine) Modify() error {
args := []string{"modifyvm", m.Name,
"--firmware", "bios",
"--bioslogofadein", "off",
"--bioslogofadeout", "off",
"--natdnshostresolver1", "on",
"--bioslogodisplaytime", "0",
"--biosbootmenu", "disabled",
"--ostype", m.OSType,
"--cpus", fmt.Sprintf("%d", m.CPUs),
"--memory", fmt.Sprintf("%d", m.Memory),
"--vram", fmt.Sprintf("%d", m.VRAM),
"--acpi", m.Flag.Get(F_acpi),
"--ioapic", m.Flag.Get(F_ioapic),
"--rtcuseutc", m.Flag.Get(F_rtcuseutc),
"--cpuhotplug", m.Flag.Get(F_cpuhotplug),
"--pae", m.Flag.Get(F_pae),
"--longmode", m.Flag.Get(F_longmode),
"--synthcpu", m.Flag.Get(F_synthcpu),
"--hpet", m.Flag.Get(F_hpet),
"--hwvirtex", m.Flag.Get(F_hwvirtex),
"--triplefaultreset", m.Flag.Get(F_triplefaultreset),
"--nestedpaging", m.Flag.Get(F_nestedpaging),
"--largepages", m.Flag.Get(F_largepages),
"--vtxvpid", m.Flag.Get(F_vtxvpid),
"--vtxux", m.Flag.Get(F_vtxux),
"--accelerate3d", m.Flag.Get(F_accelerate3d),
}
//if runtime.GOOS != "windows" {
args = append(args,
"--uart1", "0x3F8", "4",
"--uartmode1", "server", m.SerialFile,
)
//}
for i, dev := range m.BootOrder {
if i > 3 {
break // Only four slots `--boot{1,2,3,4}`. Ignore the rest.
}
args = append(args, fmt.Sprintf("--boot%d", i+1), dev)
}
if err := vbm(args...); err != nil {
return err
}
return m.Refresh()
}
// AddNATPF adds a NAT port forarding rule to the n-th NIC with the given name.
func (m *Machine) AddNATPF(n int, name string, rule driver.PFRule) error {
return vbm("controlvm", m.Name, fmt.Sprintf("natpf%d", n),
fmt.Sprintf("%s,%s", name, rule.Format()))
}
// DelNATPF deletes the NAT port forwarding rule with the given name from the n-th NIC.
func (m *Machine) DelNATPF(n int, name string) error {
return vbm("controlvm", m.Name, fmt.Sprintf("natpf%d", n), "delete", name)
}
// SetNIC set the n-th NIC.
func (m *Machine) SetNIC(n int, nic driver.NIC) error {
args := []string{"modifyvm", m.Name,
fmt.Sprintf("--nic%d", n), string(nic.Network),
fmt.Sprintf("--nictype%d", n), string(nic.Hardware),
fmt.Sprintf("--cableconnected%d", n), "on",
}
if nic.Network == "hostonly" {
args = append(args, fmt.Sprintf("--hostonlyadapter%d", n), nic.HostonlyAdapter)
}
return vbm(args...)
}
// AddStorageCtl adds a storage controller with the given name.
func (m *Machine) AddStorageCtl(name string, ctl driver.StorageController) error {
args := []string{"storagectl", m.Name, "--name", name}
if ctl.SysBus != "" {
args = append(args, "--add", string(ctl.SysBus))
}
if ctl.Ports > 0 {
args = append(args, "--portcount", fmt.Sprintf("%d", ctl.Ports))
}
if ctl.Chipset != "" {
args = append(args, "--controller", string(ctl.Chipset))
}
args = append(args, "--hostiocache", bool2string(ctl.HostIOCache))
args = append(args, "--bootable", bool2string(ctl.Bootable))
return vbm(args...)
}
// DelStorageCtl deletes the storage controller with the given name.
func (m *Machine) DelStorageCtl(name string) error {
return vbm("storagectl", m.Name, "--name", name, "--remove")
}
// AttachStorage attaches a storage medium to the named storage controller.
func (m *Machine) AttachStorage(ctlName string, medium driver.StorageMedium) error {
return vbm("storageattach", m.Name, "--storagectl", ctlName,
"--port", fmt.Sprintf("%d", medium.Port),
"--device", fmt.Sprintf("%d", medium.Device),
"--type", string(medium.DriveType),
"--medium", medium.Medium,
)
}
Set up virtualbox shares on start
Previously they were set up on create, but this meant that they
would not be created when boot2docker was upgraded.
Signed-off-by: Ben Firshman <73675debcd8a436be48ec22211dcf44fe0df0a64@firshman.co.uk>
package virtualbox
import (
"archive/tar"
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/boot2docker/boot2docker-cli/driver"
flag "github.com/ogier/pflag"
)
type Flag int
// Flag names in lowercases to be consistent with VBoxManage options.
const (
F_acpi Flag = 1 << iota
F_ioapic
F_rtcuseutc
F_cpuhotplug
F_pae
F_longmode
F_synthcpu
F_hpet
F_hwvirtex
F_triplefaultreset
F_nestedpaging
F_largepages
F_vtxvpid
F_vtxux
F_accelerate3d
)
type DriverCfg struct {
VBM string // Path to VBoxManage utility.
VMDK string // base VMDK to use as persistent disk.
shares shareSlice
// see also func ConfigFlags later in this file
}
var shareDefault string // set in ConfigFlags - this is what gets filled in for "shares" if it's empty
var (
verbose bool // Verbose mode (Local copy of B2D.Verbose).
cfg DriverCfg
)
func init() {
if err := driver.Register("virtualbox", InitFunc); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver. Error : %s", err.Error())
os.Exit(1)
}
if err := driver.RegisterConfig("virtualbox", ConfigFlags); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize driver config. Error : %s", err.Error())
os.Exit(1)
}
}
// Initialize the Machine.
func InitFunc(mc *driver.MachineConfig) (driver.Machine, error) {
verbose = mc.Verbose
m, err := GetMachine(mc.VM)
if err != nil && mc.Init {
return CreateMachine(mc)
}
return m, err
}
type shareSlice map[string]string
const shareSliceSep = "="
func (s shareSlice) String() string {
var ret []string
for name, dir := range s {
ret = append(ret, fmt.Sprintf("%s%s%s", dir, shareSliceSep, name))
}
return fmt.Sprintf("[%s]", strings.Join(ret, " "))
}
func (s *shareSlice) Set(shareDir string) error {
var shareName string
if i := strings.Index(shareDir, shareSliceSep); i >= 0 {
shareName = shareDir[i+1:]
shareDir = shareDir[:i]
}
if shareName == "" {
// parts of the VBox internal code are buggy with share names that start with "/"
shareName = strings.TrimLeft(shareDir, "/")
// TODO do some basic Windows -> MSYS path conversion
// ie, s!^([a-z]+):[/\\]+!\1/!; s!\\!/!g
}
if *s == nil {
*s = shareSlice{}
}
(*s)[shareName] = shareDir
return nil
}
// Add cmdline params for this driver
func ConfigFlags(B2D *driver.MachineConfig, flags *flag.FlagSet) error {
//B2D.DriverCfg["virtualbox"] = cfg
flags.StringVar(&cfg.VMDK, "basevmdk", "", "Path to VMDK to use as base for persistent partition")
cfg.VBM = "VBoxManage"
if runtime.GOOS == "windows" {
p := "C:\\Program Files\\Oracle\\VirtualBox"
if t := os.Getenv("VBOX_INSTALL_PATH"); t != "" {
p = t
} else if t = os.Getenv("VBOX_MSI_INSTALL_PATH"); t != "" {
p = t
}
cfg.VBM = filepath.Join(p, "VBoxManage.exe")
}
flags.StringVar(&cfg.VBM, "vbm", cfg.VBM, "path to VirtualBox management utility.")
// TODO once boot2docker improves, replace this all with homeDir() from config.go so we only share the current user's HOME by default
shareDefault = "disable"
switch runtime.GOOS {
case "darwin":
shareDefault = "/Users" + shareSliceSep + "Users"
case "windows":
shareDefault = "C:\\Users" + shareSliceSep + "c/Users"
}
return nil
}
// Convert bool to "on"/"off"
func bool2string(b bool) string {
if b {
return "on"
}
return "off"
}
// Test if flag is set. Return "on" or "off".
func (f Flag) Get(o Flag) string {
return bool2string(f&o == o)
}
// Machine information.
type Machine struct {
Name string
UUID string
State driver.MachineState
CPUs uint
Memory uint // main memory (in MB)
VRAM uint // video memory (in MB)
CfgFile string
BaseFolder string
OSType string
Flag Flag
BootOrder []string // max 4 slots, each in {none|floppy|dvd|disk|net}
DockerPort uint
SSHPort uint
SerialFile string
}
// Refresh reloads the machine information.
func (m *Machine) Refresh() error {
id := m.Name
if id == "" {
id = m.UUID
}
mm, err := GetMachine(id)
if err != nil {
return err
}
*m = *mm
return nil
}
// Start starts the machine.
func (m *Machine) Start() error {
switch m.State {
case driver.Paused:
return vbm("controlvm", m.Name, "resume")
case driver.Poweroff, driver.Saved, driver.Aborted:
if err := m.setUpShares(); err != nil {
return err
}
return vbm("startvm", m.Name, "--type", "headless")
}
if err := m.Refresh(); err == nil {
if m.State != driver.Running {
return fmt.Errorf("Failed to start", m.Name)
}
}
return nil
}
// Suspend suspends the machine and saves its state to disk.
func (m *Machine) Save() error {
switch m.State {
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "savestate")
}
// Pause pauses the execution of the machine.
func (m *Machine) Pause() error {
switch m.State {
case driver.Paused, driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "pause")
}
// Stop gracefully stops the machine.
func (m *Machine) Stop() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
case driver.Paused:
if err := m.Start(); err != nil {
return err
}
}
for m.State != driver.Poweroff { // busy wait until the machine is stopped
if err := vbm("controlvm", m.Name, "acpipowerbutton"); err != nil {
return err
}
time.Sleep(1 * time.Second)
if err := m.Refresh(); err != nil {
return err
}
}
return nil
}
// Poweroff forcefully stops the machine. State is lost and might corrupt the disk image.
func (m *Machine) Poweroff() error {
switch m.State {
case driver.Poweroff, driver.Aborted, driver.Saved:
return nil
}
return vbm("controlvm", m.Name, "poweroff")
}
// Restart gracefully restarts the machine.
func (m *Machine) Restart() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
if err := m.Stop(); err != nil {
return err
}
return m.Start()
}
// Reset forcefully restarts the machine. State is lost and might corrupt the disk image.
func (m *Machine) Reset() error {
switch m.State {
case driver.Paused, driver.Saved:
if err := m.Start(); err != nil {
return err
}
}
return vbm("controlvm", m.Name, "reset")
}
// Delete deletes the machine and associated disk images.
func (m *Machine) Delete() error {
if err := m.Poweroff(); err != nil {
return err
}
return vbm("unregistervm", m.Name, "--delete")
}
// Get current state
func (m *Machine) GetName() string {
return m.Name
}
// Get current state
func (m *Machine) GetState() driver.MachineState {
return m.State
}
// Get serial file
func (m *Machine) GetSerialFile() string {
return m.SerialFile
}
// Get Docker port
func (m *Machine) GetDockerPort() uint {
return m.DockerPort
}
// Get SSH port
func (m *Machine) GetSSHPort() uint {
return m.SSHPort
}
// GetMachine finds a machine by its name or UUID.
func GetMachine(id string) (*Machine, error) {
stdout, stderr, err := vbmOutErr("showvminfo", id, "--machinereadable")
if err != nil {
if reMachineNotFound.FindString(stderr) != "" {
return nil, ErrMachineNotExist
}
return nil, err
}
s := bufio.NewScanner(strings.NewReader(stdout))
m := &Machine{}
for s.Scan() {
res := reVMInfoLine.FindStringSubmatch(s.Text())
if res == nil {
continue
}
key := res[1]
if key == "" {
key = res[2]
}
val := res[3]
if val == "" {
val = res[4]
}
switch key {
case "name":
m.Name = val
case "UUID":
m.UUID = val
case "VMState":
m.State = driver.MachineState(val)
case "memory":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.Memory = uint(n)
case "cpus":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.CPUs = uint(n)
case "vram":
n, err := strconv.ParseUint(val, 10, 32)
if err != nil {
return nil, err
}
m.VRAM = uint(n)
case "CfgFile":
m.CfgFile = val
m.BaseFolder = filepath.Dir(val)
case "uartmode1":
// uartmode1="server,/home/sven/.boot2docker/boot2docker-vm.sock"
vals := strings.Split(val, ",")
if len(vals) >= 2 {
m.SerialFile = vals[1]
}
default:
if strings.HasPrefix(key, "Forwarding(") {
// "Forwarding(\d*)" are ordered by the name inside the val, not fixed order.
// Forwarding(0)="docker,tcp,127.0.0.1,5555,,"
// Forwarding(1)="ssh,tcp,127.0.0.1,2222,,22"
vals := strings.Split(val, ",")
n, err := strconv.ParseUint(vals[3], 10, 32)
if err != nil {
return nil, err
}
switch vals[0] {
case "docker":
m.DockerPort = uint(n)
case "ssh":
m.SSHPort = uint(n)
}
}
}
}
if err := s.Err(); err != nil {
return nil, err
}
return m, nil
}
// ListMachines lists all registered machines.
func ListMachines() ([]string, error) {
out, err := vbmOut("list", "vms")
if err != nil {
return nil, err
}
ms := []string{}
s := bufio.NewScanner(strings.NewReader(out))
for s.Scan() {
res := reVMNameUUID.FindStringSubmatch(s.Text())
if res == nil {
continue
}
ms = append(ms, res[1])
}
if err := s.Err(); err != nil {
return nil, err
}
return ms, nil
}
// CreateMachine creates a new machine. If basefolder is empty, use default.
func CreateMachine(mc *driver.MachineConfig) (*Machine, error) {
if mc.VM == "" {
return nil, fmt.Errorf("machine name is empty")
}
// Check if a machine with the given name already exists.
machineNames, err := ListMachines()
if err != nil {
return nil, err
}
for _, m := range machineNames {
if m == mc.VM {
return nil, ErrMachineExist
}
}
// Create and register the machine.
args := []string{"createvm", "--name", mc.VM, "--register"}
if err := vbm(args...); err != nil {
return nil, err
}
m, err := GetMachine(mc.VM)
if err != nil {
return nil, err
}
// Configure VM for Boot2docker
SetExtra(mc.VM, "VBoxInternal/CPUM/EnableHVP", "1")
m.OSType = "Linux26_64"
m.CPUs = uint(runtime.NumCPU())
if m.CPUs > 32 {
m.CPUs = 32
}
m.Memory = mc.Memory
m.SerialFile = mc.SerialFile
m.Flag |= F_pae
m.Flag |= F_longmode // important: use x86-64 processor
m.Flag |= F_rtcuseutc
m.Flag |= F_acpi
m.Flag |= F_ioapic
m.Flag |= F_hpet
m.Flag |= F_hwvirtex
m.Flag |= F_vtxvpid
m.Flag |= F_largepages
m.Flag |= F_nestedpaging
// Set VM boot order
m.BootOrder = []string{"dvd"}
if err := m.Modify(); err != nil {
return m, err
}
// Set NIC #1 to use NAT
m.SetNIC(1, driver.NIC{Network: driver.NICNetNAT, Hardware: driver.VirtIO})
pfRules := map[string]driver.PFRule{
"ssh": {Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.SSHPort, GuestPort: driver.SSHPort},
}
if mc.DockerPort > 0 {
pfRules["docker"] = driver.PFRule{Proto: driver.PFTCP, HostIP: net.ParseIP("127.0.0.1"), HostPort: mc.DockerPort, GuestPort: driver.DockerPort}
}
for name, rule := range pfRules {
if err := m.AddNATPF(1, name, rule); err != nil {
return m, err
}
}
hostIFName, err := getHostOnlyNetworkInterface(mc)
if err != nil {
return m, err
}
// Set NIC #2 to use host-only
if err := m.SetNIC(2, driver.NIC{Network: driver.NICNetHostonly, Hardware: driver.VirtIO, HostonlyAdapter: hostIFName}); err != nil {
return m, err
}
// Set VM storage
if err := m.AddStorageCtl("SATA", driver.StorageController{SysBus: driver.SysBusSATA, HostIOCache: true, Bootable: true}); err != nil {
return m, err
}
// Attach ISO image
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 0, Device: 0, DriveType: driver.DriveDVD, Medium: mc.ISO}); err != nil {
return m, err
}
diskImg := filepath.Join(m.BaseFolder, fmt.Sprintf("%s.vmdk", mc.VM))
if _, err := os.Stat(diskImg); err != nil {
if !os.IsNotExist(err) {
return m, err
}
if cfg.VMDK != "" {
if err := copyDiskImage(diskImg, cfg.VMDK); err != nil {
return m, err
}
} else {
magicString := "boot2docker, please format-me"
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
// magicString first so the automount script knows to format the disk
file := &tar.Header{Name: magicString, Size: int64(len(magicString))}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(magicString)); err != nil {
return m, err
}
// .ssh/key.pub => authorized_keys
file = &tar.Header{Name: ".ssh", Typeflag: tar.TypeDir, Mode: 0700}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
pubKey, err := ioutil.ReadFile(mc.SSHKey + ".pub")
if err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
file = &tar.Header{Name: ".ssh/authorized_keys2", Size: int64(len(pubKey)), Mode: 0644}
if err := tw.WriteHeader(file); err != nil {
return m, err
}
if _, err := tw.Write([]byte(pubKey)); err != nil {
return m, err
}
if err := tw.Close(); err != nil {
return m, err
}
if err := makeDiskImage(diskImg, mc.DiskSize, buf.Bytes()); err != nil {
return m, err
}
if verbose {
fmt.Println("Initializing disk with ssh keys")
fmt.Printf("WRITING: %s\n-----\n", buf)
}
}
}
if err := m.AttachStorage("SATA", driver.StorageMedium{Port: 1, Device: 0, DriveType: driver.DriveHDD, Medium: diskImg}); err != nil {
return m, err
}
return m, nil
}
func (m *Machine) setUpShares() error {
// let VBoxService do nice magic automounting (when it's used)
if err := vbm("guestproperty", "set", m.Name, "/VirtualBox/GuestAdd/SharedFolders/MountPrefix", "/"); err != nil {
return err
}
if err := vbm("guestproperty", "set", m.Name, "/VirtualBox/GuestAdd/SharedFolders/MountDir", "/"); err != nil {
return err
}
// set up some shared folders as appropriate
if len(cfg.shares) == 0 {
cfg.shares.Set(shareDefault)
}
for shareName, shareDir := range cfg.shares {
if shareDir == "disable" {
continue
}
if _, err := os.Stat(shareDir); err != nil {
return err
}
// woo, shareDir exists! let's carry on!
if err := vbm("sharedfolder", "add", m.Name, "--name", shareName, "--hostpath", shareDir, "--automount"); err != nil {
return err
}
// enable symlinks
if err := vbm("setextradata", m.Name, "VBoxInternal2/SharedFoldersEnableSymlinksCreate/"+shareName, "1"); err != nil {
return err
}
}
return nil
}
// Modify changes the settings of the machine.
func (m *Machine) Modify() error {
args := []string{"modifyvm", m.Name,
"--firmware", "bios",
"--bioslogofadein", "off",
"--bioslogofadeout", "off",
"--natdnshostresolver1", "on",
"--bioslogodisplaytime", "0",
"--biosbootmenu", "disabled",
"--ostype", m.OSType,
"--cpus", fmt.Sprintf("%d", m.CPUs),
"--memory", fmt.Sprintf("%d", m.Memory),
"--vram", fmt.Sprintf("%d", m.VRAM),
"--acpi", m.Flag.Get(F_acpi),
"--ioapic", m.Flag.Get(F_ioapic),
"--rtcuseutc", m.Flag.Get(F_rtcuseutc),
"--cpuhotplug", m.Flag.Get(F_cpuhotplug),
"--pae", m.Flag.Get(F_pae),
"--longmode", m.Flag.Get(F_longmode),
"--synthcpu", m.Flag.Get(F_synthcpu),
"--hpet", m.Flag.Get(F_hpet),
"--hwvirtex", m.Flag.Get(F_hwvirtex),
"--triplefaultreset", m.Flag.Get(F_triplefaultreset),
"--nestedpaging", m.Flag.Get(F_nestedpaging),
"--largepages", m.Flag.Get(F_largepages),
"--vtxvpid", m.Flag.Get(F_vtxvpid),
"--vtxux", m.Flag.Get(F_vtxux),
"--accelerate3d", m.Flag.Get(F_accelerate3d),
}
//if runtime.GOOS != "windows" {
args = append(args,
"--uart1", "0x3F8", "4",
"--uartmode1", "server", m.SerialFile,
)
//}
for i, dev := range m.BootOrder {
if i > 3 {
break // Only four slots `--boot{1,2,3,4}`. Ignore the rest.
}
args = append(args, fmt.Sprintf("--boot%d", i+1), dev)
}
if err := vbm(args...); err != nil {
return err
}
return m.Refresh()
}
// AddNATPF adds a NAT port forarding rule to the n-th NIC with the given name.
func (m *Machine) AddNATPF(n int, name string, rule driver.PFRule) error {
return vbm("controlvm", m.Name, fmt.Sprintf("natpf%d", n),
fmt.Sprintf("%s,%s", name, rule.Format()))
}
// DelNATPF deletes the NAT port forwarding rule with the given name from the n-th NIC.
func (m *Machine) DelNATPF(n int, name string) error {
return vbm("controlvm", m.Name, fmt.Sprintf("natpf%d", n), "delete", name)
}
// SetNIC set the n-th NIC.
func (m *Machine) SetNIC(n int, nic driver.NIC) error {
args := []string{"modifyvm", m.Name,
fmt.Sprintf("--nic%d", n), string(nic.Network),
fmt.Sprintf("--nictype%d", n), string(nic.Hardware),
fmt.Sprintf("--cableconnected%d", n), "on",
}
if nic.Network == "hostonly" {
args = append(args, fmt.Sprintf("--hostonlyadapter%d", n), nic.HostonlyAdapter)
}
return vbm(args...)
}
// AddStorageCtl adds a storage controller with the given name.
func (m *Machine) AddStorageCtl(name string, ctl driver.StorageController) error {
args := []string{"storagectl", m.Name, "--name", name}
if ctl.SysBus != "" {
args = append(args, "--add", string(ctl.SysBus))
}
if ctl.Ports > 0 {
args = append(args, "--portcount", fmt.Sprintf("%d", ctl.Ports))
}
if ctl.Chipset != "" {
args = append(args, "--controller", string(ctl.Chipset))
}
args = append(args, "--hostiocache", bool2string(ctl.HostIOCache))
args = append(args, "--bootable", bool2string(ctl.Bootable))
return vbm(args...)
}
// DelStorageCtl deletes the storage controller with the given name.
func (m *Machine) DelStorageCtl(name string) error {
return vbm("storagectl", m.Name, "--name", name, "--remove")
}
// AttachStorage attaches a storage medium to the named storage controller.
func (m *Machine) AttachStorage(ctlName string, medium driver.StorageMedium) error {
return vbm("storageattach", m.Name, "--storagectl", ctlName,
"--port", fmt.Sprintf("%d", medium.Port),
"--device", fmt.Sprintf("%d", medium.Device),
"--type", string(medium.DriveType),
"--medium", medium.Medium,
)
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/mlbright/forecast/v2"
)
var conditionIcons = map[string]string{
"clear-day": "☀️",
"clear-night": "🌙",
"cloudy": "☁️",
"fog": "🌁",
"partly-cloudy-day": "⛅️",
"partly-cloudy-night": "🌙",
"rain": "☔️",
"sleet": "❄️ ☔️",
"snow": "❄️",
"wind": "🍃",
"error": "❗️",
}
var maxCacheAge, _ = time.ParseDuration("1h")
func main() {
coordinates := flag.String("coordinates", "39.95,-75.1667", "the coordinates, expressed as latitude,longitude")
tmpDir := flag.String("tmpdir", os.TempDir(), "the directory to use to store cached responses")
key := flag.String("key", os.Getenv("FORECAST_IO_API_KEY"), "your forecast.io API key")
if *key == "" {
exitWith("Please provide your forecast.io API key with -key, or set FORECAST_IO_API_KEY", 1)
}
flag.Parse()
coordinateParts := strings.Split(*coordinates, ",")
if len(coordinateParts) != 2 {
exitWith("You must specify latitude and longitude like so: 39.95,-75.1667", 1)
}
latitude := coordinateParts[0]
longitude := coordinateParts[1]
cacheFilename := fmt.Sprintf("emoji-weather-%s-%s.json", latitude, longitude)
cacheFile := path.Join(*tmpDir, cacheFilename)
var json []byte
var err error
if isCacheStale(cacheFile) {
json, err = getForecast(*key, latitude, longitude)
if err == nil {
err = writeCache(cacheFile, json)
check(err)
}
} else {
json, err = ioutil.ReadFile(cacheFile)
check(err)
}
fmt.Println(formatConditions(extractConditionFromJSON(json)))
}
func isCacheStale(cacheFile string) bool {
stat, err := os.Stat(cacheFile)
return os.IsNotExist(err) || time.Since(stat.ModTime()) > maxCacheAge
}
func getForecast(key string, latitude string, longitude string) (json []byte, err error) {
res, err := forecast.GetResponse(key, latitude, longitude, "now", "us")
if err != nil {
return nil, err
}
json, err = ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return json, nil
}
func writeCache(cacheFile string, json []byte) (err error) {
return ioutil.WriteFile(cacheFile, json, 0644)
}
func formatConditions(condition string) (icon string) {
icon, ok := conditionIcons[condition]
if !ok {
icon = condition
}
return
}
func extractConditionFromJSON(jsonBlob []byte) (condition string) {
f, err := forecast.FromJSON(jsonBlob)
if err != nil {
return "❗️"
}
if f.Code > 0 {
return "error"
} else {
return f.Currently.Icon
}
}
func exitWith(message interface{}, status int) {
fmt.Printf("%v\n", message)
os.Exit(status)
}
func check(err error) {
if err != nil {
exitWith(err, 1)
}
}
Use flag values instead of pointers
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/mlbright/forecast/v2"
)
var conditionIcons = map[string]string{
"clear-day": "☀️",
"clear-night": "🌙",
"cloudy": "☁️",
"fog": "🌁",
"partly-cloudy-day": "⛅️",
"partly-cloudy-night": "🌙",
"rain": "☔️",
"sleet": "❄️ ☔️",
"snow": "❄️",
"wind": "🍃",
"error": "❗️",
}
var maxCacheAge, _ = time.ParseDuration("1h")
var coordinates string
var key string
var tmpDir string
func init() {
flag.StringVar(&coordinates, "coordinates", "39.95,-75.1667", "the coordinates, expressed as latitude,longitude")
flag.StringVar(&key, "key", os.Getenv("FORECAST_IO_API_KEY"), "your forecast.io API key")
flag.StringVar(&tmpDir, "tmpdir", os.TempDir(), "the directory to use to store cached responses")
flag.Parse()
}
func main() {
if key == "" {
exitWith("Please provide your forecast.io API key with -key, or set FORECAST_IO_API_KEY", 1)
}
coordinateParts := strings.Split(coordinates, ",")
if len(coordinateParts) != 2 {
exitWith("You must specify latitude and longitude like so: 39.95,-75.1667", 1)
}
latitude := coordinateParts[0]
longitude := coordinateParts[1]
cacheFilename := fmt.Sprintf("emoji-weather-%s-%s.json", latitude, longitude)
cacheFile := path.Join(tmpDir, cacheFilename)
var json []byte
var err error
if isCacheStale(cacheFile) {
json, err = getForecast(key, latitude, longitude)
check(err)
err = writeCache(cacheFile, json)
check(err)
} else {
json, err = ioutil.ReadFile(cacheFile)
check(err)
}
fmt.Println(formatConditions(extractConditionFromJSON(json)))
}
func isCacheStale(cacheFile string) bool {
stat, err := os.Stat(cacheFile)
return os.IsNotExist(err) || time.Since(stat.ModTime()) > maxCacheAge
}
func getForecast(key string, latitude string, longitude string) (json []byte, err error) {
res, err := forecast.GetResponse(key, latitude, longitude, "now", "us")
if err != nil {
return nil, err
}
json, err = ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return json, nil
}
func writeCache(cacheFile string, json []byte) (err error) {
return ioutil.WriteFile(cacheFile, json, 0644)
}
func formatConditions(condition string) (icon string) {
icon, ok := conditionIcons[condition]
if !ok {
icon = condition
}
return
}
func extractConditionFromJSON(jsonBlob []byte) (condition string) {
f, err := forecast.FromJSON(jsonBlob)
if err != nil {
return "❗️"
}
if f.Code > 0 {
return "error"
} else {
return f.Currently.Icon
}
}
func exitWith(message interface{}, status int) {
fmt.Printf("%v\n", message)
os.Exit(status)
}
func check(err error) {
if err != nil {
exitWith(err, 1)
}
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asm
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"testing"
"cmd/asm/internal/lex"
"cmd/internal/obj"
)
// An end-to-end test for the assembler: Do we print what we parse?
// Output is generated by, in effect, turning on -S and comparing the
// result against a golden file.
func testEndToEnd(t *testing.T, goarch string) {
lex.InitHist()
input := filepath.Join("testdata", goarch+".s")
output := filepath.Join("testdata", goarch+".out")
architecture, ctxt := setArch(goarch)
lexer := lex.NewLexer(input, ctxt)
parser := NewParser(ctxt, architecture, lexer)
pList := obj.Linknewplist(ctxt)
var ok bool
testOut = new(bytes.Buffer) // The assembler writes -S output to this buffer.
ctxt.Bso = obj.Binitw(os.Stdout)
defer obj.Bflush(ctxt.Bso)
ctxt.Diag = log.Fatalf
obj.Binitw(ioutil.Discard)
pList.Firstpc, ok = parser.Parse()
if !ok {
t.Fatalf("asm: ppc64 assembly failed")
}
result := string(testOut.Bytes())
expect, err := ioutil.ReadFile(output)
// For Windows.
result = strings.Replace(result, `testdata\`, `testdata/`, -1)
if err != nil {
t.Fatal(err)
}
if result != string(expect) {
if false { // Enable to capture output.
fmt.Printf("%s", result)
os.Exit(1)
}
t.Errorf("%s failed: output differs", goarch)
r := strings.Split(result, "\n")
e := strings.Split(string(expect), "\n")
if len(r) != len(e) {
t.Errorf("%s: expected %d lines, got %d", len(e), len(r))
}
n := len(e)
if n > len(r) {
n = len(r)
}
for i := 0; i < n; i++ {
if r[i] != e[i] {
t.Errorf("%s:%d:\nexpected\n\t%s\ngot\n\t%s", output, i, e[i], r[i])
}
}
}
}
func TestPPC64EndToEnd(t *testing.T) {
testEndToEnd(t, "ppc64")
}
func TestARMEndToEnd(t *testing.T) {
testEndToEnd(t, "arm")
}
func TestAMD64EndToEnd(t *testing.T) {
testEndToEnd(t, "amd64")
}
func Test386EndToEnd(t *testing.T) {
testEndToEnd(t, "386")
}
cmd/asm/internal/asm: report arch if assembly fails
Just a trivial thing I noticed in passing.
Change-Id: I875069ceffd623f9e430d07feb5042ab9e69917e
Reviewed-on: https://go-review.googlesource.com/7472
Reviewed-by: Rob Pike <4dc7c9ec434ed06502767136789763ec11d2c4b7@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asm
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"testing"
"cmd/asm/internal/lex"
"cmd/internal/obj"
)
// An end-to-end test for the assembler: Do we print what we parse?
// Output is generated by, in effect, turning on -S and comparing the
// result against a golden file.
func testEndToEnd(t *testing.T, goarch string) {
lex.InitHist()
input := filepath.Join("testdata", goarch+".s")
output := filepath.Join("testdata", goarch+".out")
architecture, ctxt := setArch(goarch)
lexer := lex.NewLexer(input, ctxt)
parser := NewParser(ctxt, architecture, lexer)
pList := obj.Linknewplist(ctxt)
var ok bool
testOut = new(bytes.Buffer) // The assembler writes -S output to this buffer.
ctxt.Bso = obj.Binitw(os.Stdout)
defer obj.Bflush(ctxt.Bso)
ctxt.Diag = log.Fatalf
obj.Binitw(ioutil.Discard)
pList.Firstpc, ok = parser.Parse()
if !ok {
t.Fatalf("asm: %s assembly failed", goarch)
}
result := string(testOut.Bytes())
expect, err := ioutil.ReadFile(output)
// For Windows.
result = strings.Replace(result, `testdata\`, `testdata/`, -1)
if err != nil {
t.Fatal(err)
}
if result != string(expect) {
if false { // Enable to capture output.
fmt.Printf("%s", result)
os.Exit(1)
}
t.Errorf("%s failed: output differs", goarch)
r := strings.Split(result, "\n")
e := strings.Split(string(expect), "\n")
if len(r) != len(e) {
t.Errorf("%s: expected %d lines, got %d", len(e), len(r))
}
n := len(e)
if n > len(r) {
n = len(r)
}
for i := 0; i < n; i++ {
if r[i] != e[i] {
t.Errorf("%s:%d:\nexpected\n\t%s\ngot\n\t%s", output, i, e[i], r[i])
}
}
}
}
func TestPPC64EndToEnd(t *testing.T) {
testEndToEnd(t, "ppc64")
}
func TestARMEndToEnd(t *testing.T) {
testEndToEnd(t, "arm")
}
func TestAMD64EndToEnd(t *testing.T) {
testEndToEnd(t, "amd64")
}
func Test386EndToEnd(t *testing.T) {
testEndToEnd(t, "386")
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// flagalloc allocates the flag register among all the flag-generating
// instructions. Flag values are recomputed if they need to be
// spilled/restored.
func flagalloc(f *Func) {
// Compute the in-register flag value we want at the end of
// each block. This is basically a best-effort live variable
// analysis, so it can be much simpler than a full analysis.
end := make([]*Value, f.NumBlocks())
po := f.postorder()
for n := 0; n < 2; n++ {
for _, b := range po {
// Walk values backwards to figure out what flag
// value we want in the flag register at the start
// of the block.
var flag *Value
for _, c := range b.ControlValues() {
if c.Type.IsFlags() {
if flag != nil {
panic("cannot have multiple controls using flags")
}
flag = c
}
}
if flag == nil {
flag = end[b.ID]
}
for j := len(b.Values) - 1; j >= 0; j-- {
v := b.Values[j]
if v == flag {
flag = nil
}
if v.clobbersFlags() {
flag = nil
}
for _, a := range v.Args {
if a.Type.IsFlags() {
flag = a
}
}
}
if flag != nil {
for _, e := range b.Preds {
p := e.b
end[p.ID] = flag
}
}
}
}
// For blocks which have a flags control value, that's the only value
// we can leave in the flags register at the end of the block. (There
// is no place to put a flag regeneration instruction.)
for _, b := range f.Blocks {
if b.Kind == BlockDefer {
// Defer blocks internally use/clobber the flags value.
end[b.ID] = nil
continue
}
for _, v := range b.ControlValues() {
if v.Type.IsFlags() && end[b.ID] != v {
end[b.ID] = nil
}
}
}
// Compute which flags values will need to be spilled.
spill := map[ID]bool{}
for _, b := range f.Blocks {
var flag *Value
if len(b.Preds) > 0 {
flag = end[b.Preds[0].b.ID]
}
for _, v := range b.Values {
for _, a := range v.Args {
if !a.Type.IsFlags() {
continue
}
if a == flag {
continue
}
// a will need to be restored here.
spill[a.ID] = true
flag = a
}
if v.clobbersFlags() {
flag = nil
}
if v.Type.IsFlags() {
flag = v
}
}
for _, v := range b.ControlValues() {
if v != flag && v.Type.IsFlags() {
spill[v.ID] = true
}
}
if v := end[b.ID]; v != nil && v != flag {
spill[v.ID] = true
}
}
// Add flag spill and recomputation where they are needed.
var remove []*Value // values that should be checked for possible removal
var oldSched []*Value
for _, b := range f.Blocks {
oldSched = append(oldSched[:0], b.Values...)
b.Values = b.Values[:0]
// The current live flag value (the pre-flagalloc copy).
var flag *Value
if len(b.Preds) > 0 {
flag = end[b.Preds[0].b.ID]
// Note: the following condition depends on the lack of critical edges.
for _, e := range b.Preds[1:] {
p := e.b
if end[p.ID] != flag {
f.Fatalf("live flag in %s's predecessors not consistent", b)
}
}
}
for _, v := range oldSched {
if v.Op == OpPhi && v.Type.IsFlags() {
f.Fatalf("phi of flags not supported: %s", v.LongString())
}
// If v will be spilled, and v uses memory, then we must split it
// into a load + a flag generator.
if spill[v.ID] && v.MemoryArg() != nil {
remove = append(remove, v)
if !f.Config.splitLoad(v) {
f.Fatalf("can't split flag generator: %s", v.LongString())
}
}
// Make sure any flag arg of v is in the flags register.
// If not, recompute it.
for i, a := range v.Args {
if !a.Type.IsFlags() {
continue
}
if a == flag {
continue
}
// Recalculate a
c := copyFlags(a, b)
// Update v.
v.SetArg(i, c)
// Remember the most-recently computed flag value.
flag = a
}
// Issue v.
b.Values = append(b.Values, v)
if v.clobbersFlags() {
flag = nil
}
if v.Type.IsFlags() {
flag = v
}
}
for i, v := range b.ControlValues() {
if v != flag && v.Type.IsFlags() {
// Recalculate control value.
remove = append(remove, v)
c := copyFlags(v, b)
b.ReplaceControl(i, c)
flag = v
}
}
if v := end[b.ID]; v != nil && v != flag {
// Need to reissue flag generator for use by
// subsequent blocks.
remove = append(remove, v)
copyFlags(v, b)
// Note: this flag generator is not properly linked up
// with the flag users. This breaks the SSA representation.
// We could fix up the users with another pass, but for now
// we'll just leave it. (Regalloc has the same issue for
// standard regs, and it runs next.)
// For this reason, take care not to add this flag
// generator to the remove list.
}
}
// Save live flag state for later.
for _, b := range f.Blocks {
b.FlagsLiveAtEnd = end[b.ID] != nil
}
const go115flagallocdeadcode = true
if !go115flagallocdeadcode {
return
}
// Remove any now-dead values.
// The number of values to remove is likely small,
// and removing them requires processing all values in a block,
// so minimize the number of blocks that we touch.
// Shrink remove to contain only dead values, and clobber those dead values.
for i := 0; i < len(remove); i++ {
v := remove[i]
if v.Uses == 0 {
v.reset(OpInvalid)
continue
}
// Remove v.
last := len(remove) - 1
remove[i] = remove[last]
remove[last] = nil
remove = remove[:last]
i-- // reprocess value at i
}
if len(remove) == 0 {
return
}
removeBlocks := f.newSparseSet(f.NumBlocks())
defer f.retSparseSet(removeBlocks)
for _, v := range remove {
removeBlocks.add(v.Block.ID)
}
// Process affected blocks, preserving value order.
for _, b := range f.Blocks {
if !removeBlocks.contains(b.ID) {
continue
}
i := 0
for j := 0; j < len(b.Values); j++ {
v := b.Values[j]
if v.Op == OpInvalid {
continue
}
b.Values[i] = v
i++
}
b.truncateValues(i)
}
}
func (v *Value) clobbersFlags() bool {
if opcodeTable[v.Op].clobberFlags {
return true
}
if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
// This case handles the possibility where a flag value is generated but never used.
// In that case, there's no corresponding Select to overwrite the flags value,
// so we must consider flags clobbered by the tuple-generating instruction.
return true
}
return false
}
// copyFlags copies v (flag generator) into b, returns the copy.
// If v's arg is also flags, copy recursively.
func copyFlags(v *Value, b *Block) *Value {
flagsArgs := make(map[int]*Value)
for i, a := range v.Args {
if a.Type.IsFlags() || a.Type.IsTuple() {
flagsArgs[i] = copyFlags(a, b)
}
}
c := v.copyInto(b)
for i, a := range flagsArgs {
c.SetArg(i, a)
}
return c
}
cmd/compile: remove go115flagallocdeadcode
Change-Id: Iafd72fb06a491075f7f996a6684e0d495c96aee5
Reviewed-on: https://go-review.googlesource.com/c/go/+/264342
Trust: Cherry Zhang <d62e63aa42ce272d7b6a5055d97e942b33a34679@google.com>
Reviewed-by: Keith Randall <8c99c3a9284e493be632950b84cd789d08ed3e9d@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// flagalloc allocates the flag register among all the flag-generating
// instructions. Flag values are recomputed if they need to be
// spilled/restored.
func flagalloc(f *Func) {
// Compute the in-register flag value we want at the end of
// each block. This is basically a best-effort live variable
// analysis, so it can be much simpler than a full analysis.
end := make([]*Value, f.NumBlocks())
po := f.postorder()
for n := 0; n < 2; n++ {
for _, b := range po {
// Walk values backwards to figure out what flag
// value we want in the flag register at the start
// of the block.
var flag *Value
for _, c := range b.ControlValues() {
if c.Type.IsFlags() {
if flag != nil {
panic("cannot have multiple controls using flags")
}
flag = c
}
}
if flag == nil {
flag = end[b.ID]
}
for j := len(b.Values) - 1; j >= 0; j-- {
v := b.Values[j]
if v == flag {
flag = nil
}
if v.clobbersFlags() {
flag = nil
}
for _, a := range v.Args {
if a.Type.IsFlags() {
flag = a
}
}
}
if flag != nil {
for _, e := range b.Preds {
p := e.b
end[p.ID] = flag
}
}
}
}
// For blocks which have a flags control value, that's the only value
// we can leave in the flags register at the end of the block. (There
// is no place to put a flag regeneration instruction.)
for _, b := range f.Blocks {
if b.Kind == BlockDefer {
// Defer blocks internally use/clobber the flags value.
end[b.ID] = nil
continue
}
for _, v := range b.ControlValues() {
if v.Type.IsFlags() && end[b.ID] != v {
end[b.ID] = nil
}
}
}
// Compute which flags values will need to be spilled.
spill := map[ID]bool{}
for _, b := range f.Blocks {
var flag *Value
if len(b.Preds) > 0 {
flag = end[b.Preds[0].b.ID]
}
for _, v := range b.Values {
for _, a := range v.Args {
if !a.Type.IsFlags() {
continue
}
if a == flag {
continue
}
// a will need to be restored here.
spill[a.ID] = true
flag = a
}
if v.clobbersFlags() {
flag = nil
}
if v.Type.IsFlags() {
flag = v
}
}
for _, v := range b.ControlValues() {
if v != flag && v.Type.IsFlags() {
spill[v.ID] = true
}
}
if v := end[b.ID]; v != nil && v != flag {
spill[v.ID] = true
}
}
// Add flag spill and recomputation where they are needed.
var remove []*Value // values that should be checked for possible removal
var oldSched []*Value
for _, b := range f.Blocks {
oldSched = append(oldSched[:0], b.Values...)
b.Values = b.Values[:0]
// The current live flag value (the pre-flagalloc copy).
var flag *Value
if len(b.Preds) > 0 {
flag = end[b.Preds[0].b.ID]
// Note: the following condition depends on the lack of critical edges.
for _, e := range b.Preds[1:] {
p := e.b
if end[p.ID] != flag {
f.Fatalf("live flag in %s's predecessors not consistent", b)
}
}
}
for _, v := range oldSched {
if v.Op == OpPhi && v.Type.IsFlags() {
f.Fatalf("phi of flags not supported: %s", v.LongString())
}
// If v will be spilled, and v uses memory, then we must split it
// into a load + a flag generator.
if spill[v.ID] && v.MemoryArg() != nil {
remove = append(remove, v)
if !f.Config.splitLoad(v) {
f.Fatalf("can't split flag generator: %s", v.LongString())
}
}
// Make sure any flag arg of v is in the flags register.
// If not, recompute it.
for i, a := range v.Args {
if !a.Type.IsFlags() {
continue
}
if a == flag {
continue
}
// Recalculate a
c := copyFlags(a, b)
// Update v.
v.SetArg(i, c)
// Remember the most-recently computed flag value.
flag = a
}
// Issue v.
b.Values = append(b.Values, v)
if v.clobbersFlags() {
flag = nil
}
if v.Type.IsFlags() {
flag = v
}
}
for i, v := range b.ControlValues() {
if v != flag && v.Type.IsFlags() {
// Recalculate control value.
remove = append(remove, v)
c := copyFlags(v, b)
b.ReplaceControl(i, c)
flag = v
}
}
if v := end[b.ID]; v != nil && v != flag {
// Need to reissue flag generator for use by
// subsequent blocks.
remove = append(remove, v)
copyFlags(v, b)
// Note: this flag generator is not properly linked up
// with the flag users. This breaks the SSA representation.
// We could fix up the users with another pass, but for now
// we'll just leave it. (Regalloc has the same issue for
// standard regs, and it runs next.)
// For this reason, take care not to add this flag
// generator to the remove list.
}
}
// Save live flag state for later.
for _, b := range f.Blocks {
b.FlagsLiveAtEnd = end[b.ID] != nil
}
// Remove any now-dead values.
// The number of values to remove is likely small,
// and removing them requires processing all values in a block,
// so minimize the number of blocks that we touch.
// Shrink remove to contain only dead values, and clobber those dead values.
for i := 0; i < len(remove); i++ {
v := remove[i]
if v.Uses == 0 {
v.reset(OpInvalid)
continue
}
// Remove v.
last := len(remove) - 1
remove[i] = remove[last]
remove[last] = nil
remove = remove[:last]
i-- // reprocess value at i
}
if len(remove) == 0 {
return
}
removeBlocks := f.newSparseSet(f.NumBlocks())
defer f.retSparseSet(removeBlocks)
for _, v := range remove {
removeBlocks.add(v.Block.ID)
}
// Process affected blocks, preserving value order.
for _, b := range f.Blocks {
if !removeBlocks.contains(b.ID) {
continue
}
i := 0
for j := 0; j < len(b.Values); j++ {
v := b.Values[j]
if v.Op == OpInvalid {
continue
}
b.Values[i] = v
i++
}
b.truncateValues(i)
}
}
func (v *Value) clobbersFlags() bool {
if opcodeTable[v.Op].clobberFlags {
return true
}
if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
// This case handles the possibility where a flag value is generated but never used.
// In that case, there's no corresponding Select to overwrite the flags value,
// so we must consider flags clobbered by the tuple-generating instruction.
return true
}
return false
}
// copyFlags copies v (flag generator) into b, returns the copy.
// If v's arg is also flags, copy recursively.
func copyFlags(v *Value, b *Block) *Value {
flagsArgs := make(map[int]*Value)
for i, a := range v.Args {
if a.Type.IsFlags() || a.Type.IsTuple() {
flagsArgs[i] = copyFlags(a, b)
}
}
c := v.copyInto(b)
for i, a := range flagsArgs {
c.SetArg(i, a)
}
return c
}
|
package clockwork
import (
"sync/atomic"
"time"
)
// Timer provides an interface which can be used instead of directly
// using the timer within the time module. The real-time timer t
// provides events through t.Chan which becomes now t.Chan() to make this
// channel requirement definable in this interface.
type Timer interface {
Chan() <-chan time.Time
Reset(d time.Duration) bool
Stop() bool
// C is deprecated, prefer Chan. It will be deleted in the future.
C() <-chan time.Time
}
type realTimer struct {
t *time.Timer
}
func (r realTimer) Chan() <-chan time.Time {
return r.t.C
}
func (r realTimer) C() <-chan time.Time {
return r.t.C
}
func (r realTimer) Reset(d time.Duration) bool {
return r.t.Reset(d)
}
func (r realTimer) Stop() bool {
return r.t.Stop()
}
type fakeTimer struct {
c chan time.Time
clock FakeClock
stop chan struct{}
reset chan reset
stopped uint32
}
func (f *fakeTimer) Chan() <-chan time.Time {
return f.c
}
func (f *fakeTimer) C() <-chan time.Time {
return f.c
}
func (f *fakeTimer) Reset(d time.Duration) bool {
stopped := f.Stop()
f.reset <- reset{t: f.clock.Now().Add(d), next: f.clock.After(d)}
if d > 0 {
atomic.StoreUint32(&f.stopped, 0)
}
return stopped
}
func (f *fakeTimer) Stop() bool {
if atomic.CompareAndSwapUint32(&f.stopped, 0, 1) {
f.stop <- struct{}{}
return true
}
return false
}
type reset struct {
t time.Time
next <-chan time.Time
}
// run initializes a background goroutine to send the timer event to the timer channel
// after the period. Events are discarded if the underlying ticker channel does not have
// enough capacity.
func (f *fakeTimer) run(initialDuration time.Duration) {
nextTick := f.clock.Now().Add(initialDuration)
next := f.clock.After(initialDuration)
waitForReset := func() (time.Time, <-chan time.Time) {
for {
select {
case <-f.stop:
continue
case r := <-f.reset:
return r.t, r.next
}
}
}
go func() {
for {
select {
case <-f.stop:
case <-next:
atomic.StoreUint32(&f.stopped, 1)
select {
case f.c <- nextTick:
default:
}
next = nil
}
nextTick, next = waitForReset()
}
}()
}
Remove C(), again
package clockwork
import (
"sync/atomic"
"time"
)
// Timer provides an interface which can be used instead of directly
// using the timer within the time module. The real-time timer t
// provides events through t.Chan which becomes now t.Chan() to make this
// channel requirement definable in this interface.
type Timer interface {
Chan() <-chan time.Time
Reset(d time.Duration) bool
Stop() bool
}
type realTimer struct {
*time.Timer
}
func (r realTimer) Chan() <-chan time.Time {
return r.C
}
type fakeTimer struct {
c chan time.Time
clock FakeClock
stop chan struct{}
reset chan reset
stopped uint32
}
func (f *fakeTimer) Chan() <-chan time.Time {
return f.c
}
func (f *fakeTimer) Reset(d time.Duration) bool {
stopped := f.Stop()
f.reset <- reset{t: f.clock.Now().Add(d), next: f.clock.After(d)}
if d > 0 {
atomic.StoreUint32(&f.stopped, 0)
}
return stopped
}
func (f *fakeTimer) Stop() bool {
if atomic.CompareAndSwapUint32(&f.stopped, 0, 1) {
f.stop <- struct{}{}
return true
}
return false
}
type reset struct {
t time.Time
next <-chan time.Time
}
// run initializes a background goroutine to send the timer event to the timer channel
// after the period. Events are discarded if the underlying ticker channel does not have
// enough capacity.
func (f *fakeTimer) run(initialDuration time.Duration) {
nextTick := f.clock.Now().Add(initialDuration)
next := f.clock.After(initialDuration)
waitForReset := func() (time.Time, <-chan time.Time) {
for {
select {
case <-f.stop:
continue
case r := <-f.reset:
return r.t, r.next
}
}
}
go func() {
for {
select {
case <-f.stop:
case <-next:
atomic.StoreUint32(&f.stopped, 1)
select {
case f.c <- nextTick:
default:
}
next = nil
}
nextTick, next = waitForReset()
}
}()
}
|
package google
import (
"os"
"testing"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider()
testAccProviders = map[string]terraform.ResourceProvider{
"google": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("GOOGLE_ACCOUNT_FILE"); v == "" {
t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests")
}
if v := os.Getenv("GOOGLE_CLIENT_FILE"); v == "" {
t.Fatal("GOOGLE_CLIENT_FILE must be set for acceptance tests")
}
}
Require project id as otherwise it will fail with cryptic error
package google
import (
"os"
"testing"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider()
testAccProviders = map[string]terraform.ResourceProvider{
"google": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("GOOGLE_ACCOUNT_FILE"); v == "" {
t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests")
}
if v := os.Getenv("GOOGLE_CLIENT_FILE"); v == "" {
t.Fatal("GOOGLE_CLIENT_FILE must be set for acceptance tests")
}
if v := os.Getenv("GOOGLE_PROJECT"); v == "" {
t.Fatal("GOOGLE_PROJECT must be set for acceptance tests")
}
}
|
package winrm
import (
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"net/http"
"github.com/masterzen/winrm/soap"
)
type Client struct {
Parameters
username string
password string
useHTTPS bool
url string
http HttpPost
transport *http.Transport
}
// NewClient will create a new remote client on url, connecting with user and password
// This function doesn't connect (connection happens only when CreateShell is called)
func NewClient(endpoint *Endpoint, user, password string) (client *Client, err error) {
params := DefaultParameters()
client, err = NewClientWithParameters(endpoint, user, password, params)
return
}
// NewClient will create a new remote client on url, connecting with user and password
// This function doesn't connect (connection happens only when CreateShell is called)
func NewClientWithParameters(endpoint *Endpoint, user, password string, params *Parameters) (client *Client, err error) {
transport, err := newTransport(endpoint)
client = &Client{
Parameters: *params,
username: user,
password: password,
url: endpoint.url(),
http: Http_post,
useHTTPS: endpoint.HTTPS,
transport: transport,
}
return
}
// newTransport will create a new HTTP Transport, with options specified within the endpoint configuration
func newTransport(endpoint *Endpoint) (*http.Transport, error) {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: endpoint.Insecure,
},
}
if endpoint.CACert != nil && len(*endpoint.CACert) > 0 {
certPool, err := readCACerts(endpoint.CACert)
if err != nil {
return nil, err
}
transport.TLSClientConfig.RootCAs = certPool
}
return transport, nil
}
func readCACerts(certs *[]byte) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(*certs) {
return nil, fmt.Errorf("Unable to read certificates")
}
return certPool, nil
}
// CreateShell will create a WinRM Shell, which is the prealable for running
// commands.
func (client *Client) CreateShell() (shell *Shell, err error) {
request := NewOpenShellRequest(client.url, &client.Parameters)
defer request.Free()
response, err := client.sendRequest(request)
if err == nil {
var shellId string
if shellId, err = ParseOpenShellResponse(response); err == nil {
shell = &Shell{client: client, ShellId: shellId}
}
}
return
}
func (client *Client) sendRequest(request *soap.SoapMessage) (response string, err error) {
return client.http(client, request)
}
// Run will run command on the the remote host, writing the process stdout and stderr to
// the given writers. Note with this method it isn't possible to inject stdin.
func (client *Client) Run(command string, stdout io.Writer, stderr io.Writer) (exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return 0, err
}
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return 0, err
}
go io.Copy(stdout, cmd.Stdout)
go io.Copy(stderr, cmd.Stderr)
cmd.Wait()
shell.Close()
return cmd.ExitCode(), cmd.err
}
// Run will run command on the the remote host, returning the process stdout and stderr
// as strings, and using the input stdin string as the process input
func (client *Client) RunWithString(command string, stdin string) (stdout string, stderr string, exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return "", "", 0, err
}
defer shell.Close()
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return "", "", 0, err
}
if len(stdin) > 0 {
cmd.Stdin.Write([]byte(stdin))
}
var outWriter, errWriter bytes.Buffer
go io.Copy(&outWriter, cmd.Stdout)
go io.Copy(&errWriter, cmd.Stderr)
cmd.Wait()
return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err
}
// Run will run command on the the remote host, writing the process stdout and stderr to
// the given writers, and injecting the process stdin with the stdin reader.
// Warning stdin (not stdout/stderr) are bufferized, which means reading only one byte in stdin will
// send a winrm http packet to the remote host. If stdin is a pipe, it might be better for
// performance reasons to buffer it.
func (client *Client) RunWithInput(command string, stdout io.Writer, stderr io.Writer, stdin io.Reader) (exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return 0, err
}
defer shell.Close()
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return 0, err
}
go io.Copy(cmd.Stdin, stdin)
go io.Copy(stdout, cmd.Stdout)
go io.Copy(stderr, cmd.Stderr)
cmd.Wait()
return cmd.ExitCode(), cmd.err
}
Add ability to pass transport.
package winrm
import (
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"net/http"
"github.com/masterzen/winrm/soap"
)
type Client struct {
Parameters
username string
password string
useHTTPS bool
url string
http HttpPost
transport *http.Transport
}
// NewClient will create a new remote client on url, connecting with user and password
// This function doesn't connect (connection happens only when CreateShell is called)
func NewClient(endpoint *Endpoint, user, password string) (client *Client, err error) {
params := DefaultParameters()
client, err = NewClientWithParameters(endpoint, user, password, params)
return
}
// NewClient will create a new remote client on url, connecting with user and password
// This function doesn't connect (connection happens only when CreateShell is called)
func NewClientWithParameters(endpoint *Endpoint, user, password string, params *Parameters) (client *Client, err error) {
transport, err := newTransport(endpoint)
client = &Client{
Parameters: *params,
username: user,
password: password,
url: endpoint.url(),
http: Http_post,
useHTTPS: endpoint.HTTPS,
transport: transport,
}
return
}
// NewClientWithParametersAndTransport will create a new remote client on url, connecting with user and password
// This function doesn't connect (connection happens only when CreateShell is called)
func NewClientWithParametersAndTransport(endpoint *Endpoint, user, password string, params *Parameters,transport *http.Transport) (client *Client, err error) {
client = &Client{
Parameters: *params,
username: user,
password: password,
url: endpoint.url(),
http: Http_post,
useHTTPS: endpoint.HTTPS,
transport: transport,
}
return
}
// newTransport will create a new HTTP Transport, with options specified within the endpoint configuration
func newTransport(endpoint *Endpoint) (*http.Transport, error) {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: endpoint.Insecure,
},
}
if endpoint.CACert != nil && len(*endpoint.CACert) > 0 {
certPool, err := readCACerts(endpoint.CACert)
if err != nil {
return nil, err
}
transport.TLSClientConfig.RootCAs = certPool
}
return transport, nil
}
func readCACerts(certs *[]byte) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(*certs) {
return nil, fmt.Errorf("Unable to read certificates")
}
return certPool, nil
}
// CreateShell will create a WinRM Shell, which is the prealable for running
// commands.
func (client *Client) CreateShell() (shell *Shell, err error) {
request := NewOpenShellRequest(client.url, &client.Parameters)
defer request.Free()
response, err := client.sendRequest(request)
if err == nil {
var shellId string
if shellId, err = ParseOpenShellResponse(response); err == nil {
shell = &Shell{client: client, ShellId: shellId}
}
}
return
}
func (client *Client) sendRequest(request *soap.SoapMessage) (response string, err error) {
return client.http(client, request)
}
// Run will run command on the the remote host, writing the process stdout and stderr to
// the given writers. Note with this method it isn't possible to inject stdin.
func (client *Client) Run(command string, stdout io.Writer, stderr io.Writer) (exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return 0, err
}
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return 0, err
}
go io.Copy(stdout, cmd.Stdout)
go io.Copy(stderr, cmd.Stderr)
cmd.Wait()
shell.Close()
return cmd.ExitCode(), cmd.err
}
// Run will run command on the the remote host, returning the process stdout and stderr
// as strings, and using the input stdin string as the process input
func (client *Client) RunWithString(command string, stdin string) (stdout string, stderr string, exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return "", "", 0, err
}
defer shell.Close()
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return "", "", 0, err
}
if len(stdin) > 0 {
cmd.Stdin.Write([]byte(stdin))
}
var outWriter, errWriter bytes.Buffer
go io.Copy(&outWriter, cmd.Stdout)
go io.Copy(&errWriter, cmd.Stderr)
cmd.Wait()
return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err
}
// Run will run command on the the remote host, writing the process stdout and stderr to
// the given writers, and injecting the process stdin with the stdin reader.
// Warning stdin (not stdout/stderr) are bufferized, which means reading only one byte in stdin will
// send a winrm http packet to the remote host. If stdin is a pipe, it might be better for
// performance reasons to buffer it.
func (client *Client) RunWithInput(command string, stdout io.Writer, stderr io.Writer, stdin io.Reader) (exitCode int, err error) {
shell, err := client.CreateShell()
if err != nil {
return 0, err
}
defer shell.Close()
var cmd *Command
cmd, err = shell.Execute(command)
if err != nil {
return 0, err
}
go io.Copy(cmd.Stdin, stdin)
go io.Copy(stdout, cmd.Stdout)
go io.Copy(stderr, cmd.Stderr)
cmd.Wait()
return cmd.ExitCode(), cmd.err
}
|
// Copyright (c) 2013-2015 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"fmt"
"io"
)
// MaxAddrPerMsg is the maximum number of addresses that can be in a single
// bitcoin addr message (MsgAddr).
const MaxAddrPerMsg = 1000
// MsgAddr implements the Message interface and represents a bitcoin
// addr message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an addr message to another peer.
type MsgAddr struct {
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddr) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses in message [max %v]",
MaxAddrPerMsg)
return messageError("MsgAddr.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddr) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddr) ClearAddresses() {
msg.AddrList = []*NetAddress{}
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgAddr) BtcDecode(r io.Reader, pver uint32) error {
count, err := readVarInt(r, pver)
if err != nil {
return err
}
// Limit to max addresses per message.
if count > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses for message "+
"[count %v, max %v]", count, MaxAddrPerMsg)
return messageError("MsgAddr.BtcDecode", str)
}
msg.AddrList = make([]*NetAddress, 0, count)
for i := uint64(0); i < count; i++ {
na := NetAddress{}
err := readNetAddress(r, pver, &na, true)
if err != nil {
return err
}
msg.AddAddress(&na)
}
return nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgAddr) BtcEncode(w io.Writer, pver uint32) error {
// Protocol versions before MultipleAddressVersion only allowed 1 address
// per message.
count := len(msg.AddrList)
if pver < ProtocolVersion && count > 1 {
str := fmt.Sprintf("too many addresses for message of "+
"protocol version %v [count %v, max 1]", pver, count)
return messageError("MsgAddr.BtcEncode", str)
}
if count > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses for message "+
"[count %v, max %v]", count, MaxAddrPerMsg)
return messageError("MsgAddr.BtcEncode", str)
}
err := writeVarInt(w, pver, uint64(count))
if err != nil {
return err
}
for _, na := range msg.AddrList {
err = writeNetAddress(w, pver, na, true)
if err != nil {
return err
}
}
return nil
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgAddr) Command() string {
return CmdAddr
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgAddr) MaxPayloadLength(pver uint32) uint32 {
if pver < ProtocolVersion {
// Num addresses (varInt) + a single net addresses.
return MaxVarIntPayload + maxNetAddressPayload(pver)
}
// Num addresses (varInt) + max allowed addresses.
return MaxVarIntPayload + (MaxAddrPerMsg * maxNetAddressPayload(pver))
}
// NewMsgAddr returns a new bitcoin addr message that conforms to the
// Message interface. See MsgAddr for details.
func NewMsgAddr() *MsgAddr {
return &MsgAddr{
AddrList: make([]*NetAddress, 0, MaxAddrPerMsg),
}
}
Remove legacy Bitcoin addr encoding
Legacy msgaddr encoding for Bitcoin caused getaddr response
failure when message protocol version was >1. This fixes this
bug.
// Copyright (c) 2013-2015 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"fmt"
"io"
)
// MaxAddrPerMsg is the maximum number of addresses that can be in a single
// bitcoin addr message (MsgAddr).
const MaxAddrPerMsg = 1000
// MsgAddr implements the Message interface and represents a bitcoin
// addr message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an addr message to another peer.
type MsgAddr struct {
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddr) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses in message [max %v]",
MaxAddrPerMsg)
return messageError("MsgAddr.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddr) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddr) ClearAddresses() {
msg.AddrList = []*NetAddress{}
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgAddr) BtcDecode(r io.Reader, pver uint32) error {
count, err := readVarInt(r, pver)
if err != nil {
return err
}
// Limit to max addresses per message.
if count > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses for message "+
"[count %v, max %v]", count, MaxAddrPerMsg)
return messageError("MsgAddr.BtcDecode", str)
}
msg.AddrList = make([]*NetAddress, 0, count)
for i := uint64(0); i < count; i++ {
na := NetAddress{}
err := readNetAddress(r, pver, &na, true)
if err != nil {
return err
}
msg.AddAddress(&na)
}
return nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgAddr) BtcEncode(w io.Writer, pver uint32) error {
// Protocol versions before MultipleAddressVersion only allowed 1 address
// per message.
count := len(msg.AddrList)
if count > MaxAddrPerMsg {
str := fmt.Sprintf("too many addresses for message "+
"[count %v, max %v]", count, MaxAddrPerMsg)
return messageError("MsgAddr.BtcEncode", str)
}
err := writeVarInt(w, pver, uint64(count))
if err != nil {
return err
}
for _, na := range msg.AddrList {
err = writeNetAddress(w, pver, na, true)
if err != nil {
return err
}
}
return nil
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgAddr) Command() string {
return CmdAddr
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgAddr) MaxPayloadLength(pver uint32) uint32 {
// Num addresses (varInt) + max allowed addresses.
return MaxVarIntPayload + (MaxAddrPerMsg * maxNetAddressPayload(pver))
}
// NewMsgAddr returns a new bitcoin addr message that conforms to the
// Message interface. See MsgAddr for details.
func NewMsgAddr() *MsgAddr {
return &MsgAddr{
AddrList: make([]*NetAddress, 0, MaxAddrPerMsg),
}
}
|
package native
import (
"github.com/ziutek/mymysql/mysql"
"log"
"math"
"strconv"
)
type Result struct {
my *Conn
status_only bool // true if result doesn't contain result set
binary bool // Binary result expected
field_count int
fields []*mysql.Field // Fields table
fc_map map[string]int // Maps field name to column number
message []byte
affected_rows uint64
// Primary key value (useful for AUTO_INCREMENT primary keys)
insert_id uint64
// Number of warinigs during command execution
// You can use the SHOW WARNINGS query for details.
warning_count int
// MySQL server status immediately after the query execution
status uint16
// Seted by GetRow if it returns nil row
eor_returned bool
}
// Returns true if this is status result that includes no result set
func (res *Result) StatusOnly() bool {
return res.status_only
}
// Returns a table containing descriptions of the columns
func (res *Result) Fields() []*mysql.Field {
return res.fields
}
// Returns index for given name or -1 if field of that name doesn't exist
func (res *Result) Map(field_name string) int {
if fi, ok := res.fc_map[field_name]; ok {
return fi
}
return -1
}
func (res *Result) Message() string {
return string(res.message)
}
func (res *Result) AffectedRows() uint64 {
return res.affected_rows
}
func (res *Result) InsertId() uint64 {
return res.insert_id
}
func (res *Result) WarnCount() int {
return res.warning_count
}
func (my *Conn) getResult(res *Result) interface{} {
loop:
pr := my.newPktReader() // New reader for next packet
pkt0 := readByte(pr)
if pkt0 == 255 {
// Error packet
my.getErrorPacket(pr)
}
if res == nil {
switch {
case pkt0 == 0:
// OK packet
return my.getOkPacket(pr)
case pkt0 > 0 && pkt0 < 251:
// Result set header packet
res = my.getResSetHeadPacket(pr)
// Read next packet
goto loop
}
} else {
switch {
case pkt0 == 254:
// EOF packet
res.warning_count, res.status = my.getEofPacket(pr)
my.status = res.status
return res
case pkt0 > 0 && pkt0 < 251 && res.field_count < len(res.fields):
// Field packet
field := my.getFieldPacket(pr)
res.fields[res.field_count] = field
res.fc_map[field.Name] = res.field_count
// Increment field count
res.field_count++
// Read next packet
goto loop
case pkt0 < 254 && res.field_count == len(res.fields):
// Row Data Packet
if res.binary {
return my.getBinRowPacket(pr, res)
} else {
return my.getTextRowPacket(pr, res)
}
}
}
panic(UNK_RESULT_PKT_ERROR)
}
func (my *Conn) getOkPacket(pr *pktReader) (res *Result) {
if my.Debug {
log.Printf("[%2d ->] OK packet:", my.seq-1)
}
res = new(Result)
res.status_only = true
res.my = my
// First byte was readed by getResult
res.affected_rows = readLCB(pr)
res.insert_id = readLCB(pr)
res.status = readU16(pr)
my.status = res.status
res.warning_count = int(readU16(pr))
res.message = pr.readAll()
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"AffectedRows=%d InsertId=0x%x Status=0x%x "+
"WarningCount=%d Message=\"%s\"", res.affected_rows, res.insert_id,
res.status, res.warning_count, res.message,
)
}
return
}
func (my *Conn) getErrorPacket(pr *pktReader) {
if my.Debug {
log.Printf("[%2d ->] Error packet:", my.seq-1)
}
var err mysql.Error
err.Code = readU16(pr)
if readByte(pr) != '#' {
panic(PKT_ERROR)
}
read(pr, 5)
err.Msg = pr.readAll()
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"code=0x%x msg=\"%s\"", err.Code, err.Msg)
}
panic(&err)
}
func (my *Conn) getEofPacket(pr *pktReader) (warn_count int, status uint16) {
if my.Debug {
log.Printf("[%2d ->] EOF packet:", my.seq-1)
}
warn_count = int(readU16(pr))
status = readU16(pr)
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"WarningCount=%d Status=0x%x", warn_count, status)
}
return
}
func (my *Conn) getResSetHeadPacket(pr *pktReader) (res *Result) {
if my.Debug {
log.Printf("[%2d ->] Result set header packet:", my.seq-1)
}
pr.unreadByte()
field_count := int(readLCB(pr))
pr.checkEof()
res = &Result{
my: my,
fields: make([]*mysql.Field, field_count),
fc_map: make(map[string]int),
}
if my.Debug {
log.Printf(tab8s+"FieldCount=%d", field_count)
}
return
}
func (my *Conn) getFieldPacket(pr *pktReader) (field *mysql.Field) {
if my.Debug {
log.Printf("[%2d ->] Field packet:", my.seq-1)
}
pr.unreadByte()
field = new(mysql.Field)
field.Catalog = readStr(pr)
field.Db = readStr(pr)
field.Table = readStr(pr)
field.OrgTable = readStr(pr)
field.Name = readStr(pr)
field.OrgName = readStr(pr)
read(pr, 1+2)
//field.Charset= readU16(pr)
field.DispLen = readU32(pr)
field.Type = readByte(pr)
field.Flags = readU16(pr)
field.Scale = readByte(pr)
read(pr, 2)
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"Name=\"%s\" Type=0x%x", field.Name, field.Type)
}
return
}
func (my *Conn) getTextRowPacket(pr *pktReader, res *Result) mysql.Row {
if my.Debug {
log.Printf("[%2d ->] Text row data packet", my.seq-1)
}
pr.unreadByte()
row := make(mysql.Row, res.field_count)
for ii := 0; ii < res.field_count; ii++ {
bin, null := readNullBin(pr)
if null {
row[ii] = nil
} else {
row[ii] = bin
}
}
pr.checkEof()
return row
}
func (my *Conn) getBinRowPacket(pr *pktReader, res *Result) mysql.Row {
if my.Debug {
log.Printf("[%2d ->] Binary row data packet", my.seq-1)
}
// First byte was readed by getResult
null_bitmap := make([]byte, (res.field_count+7+2)>>3)
readFull(pr, null_bitmap)
row := make(mysql.Row, res.field_count)
for ii, field := range res.fields {
null_byte := (ii + 2) >> 3
null_mask := byte(1) << uint(2+ii-(null_byte<<3))
if null_bitmap[null_byte]&null_mask != 0 {
// Null field
row[ii] = nil
continue
}
typ := field.Type
unsigned := (field.Flags & _FLAG_UNSIGNED) != 0
switch typ {
case MYSQL_TYPE_TINY:
if unsigned {
row[ii] = readByte(pr)
} else {
row[ii] = int8(readByte(pr))
}
case MYSQL_TYPE_SHORT:
if unsigned {
row[ii] = readU16(pr)
} else {
row[ii] = int16(readU16(pr))
}
case MYSQL_TYPE_LONG, MYSQL_TYPE_INT24:
if unsigned {
row[ii] = readU32(pr)
} else {
row[ii] = int32(readU32(pr))
}
case MYSQL_TYPE_LONGLONG:
if unsigned {
row[ii] = readU64(pr)
} else {
row[ii] = int64(readU64(pr))
}
case MYSQL_TYPE_FLOAT:
row[ii] = math.Float32frombits(readU32(pr))
case MYSQL_TYPE_DOUBLE:
row[ii] = math.Float64frombits(readU64(pr))
case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL:
dec := string(readBin(pr))
var err error
row[ii], err = strconv.ParseFloat(dec, 64)
if err != nil {
panic("MySQL server returned wrong decimal value: " + dec)
}
case MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_BIT, MYSQL_TYPE_BLOB,
MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_MEDIUM_BLOB,
MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_SET, MYSQL_TYPE_ENUM:
row[ii] = readBin(pr)
case MYSQL_TYPE_DATE:
row[ii] = readDate(pr)
case MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP:
row[ii] = readTime(pr)
case MYSQL_TYPE_TIME:
row[ii] = readDuration(pr)
// TODO: MYSQL_TYPE_NEWDATE, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_GEOMETRY
default:
panic(UNK_MYSQL_TYPE_ERROR)
}
}
return row
}
Forgotten TODO about handling some MySQL types - done.
package native
import (
"github.com/ziutek/mymysql/mysql"
"log"
"math"
"strconv"
)
type Result struct {
my *Conn
status_only bool // true if result doesn't contain result set
binary bool // Binary result expected
field_count int
fields []*mysql.Field // Fields table
fc_map map[string]int // Maps field name to column number
message []byte
affected_rows uint64
// Primary key value (useful for AUTO_INCREMENT primary keys)
insert_id uint64
// Number of warinigs during command execution
// You can use the SHOW WARNINGS query for details.
warning_count int
// MySQL server status immediately after the query execution
status uint16
// Seted by GetRow if it returns nil row
eor_returned bool
}
// Returns true if this is status result that includes no result set
func (res *Result) StatusOnly() bool {
return res.status_only
}
// Returns a table containing descriptions of the columns
func (res *Result) Fields() []*mysql.Field {
return res.fields
}
// Returns index for given name or -1 if field of that name doesn't exist
func (res *Result) Map(field_name string) int {
if fi, ok := res.fc_map[field_name]; ok {
return fi
}
return -1
}
func (res *Result) Message() string {
return string(res.message)
}
func (res *Result) AffectedRows() uint64 {
return res.affected_rows
}
func (res *Result) InsertId() uint64 {
return res.insert_id
}
func (res *Result) WarnCount() int {
return res.warning_count
}
func (my *Conn) getResult(res *Result) interface{} {
loop:
pr := my.newPktReader() // New reader for next packet
pkt0 := readByte(pr)
if pkt0 == 255 {
// Error packet
my.getErrorPacket(pr)
}
if res == nil {
switch {
case pkt0 == 0:
// OK packet
return my.getOkPacket(pr)
case pkt0 > 0 && pkt0 < 251:
// Result set header packet
res = my.getResSetHeadPacket(pr)
// Read next packet
goto loop
}
} else {
switch {
case pkt0 == 254:
// EOF packet
res.warning_count, res.status = my.getEofPacket(pr)
my.status = res.status
return res
case pkt0 > 0 && pkt0 < 251 && res.field_count < len(res.fields):
// Field packet
field := my.getFieldPacket(pr)
res.fields[res.field_count] = field
res.fc_map[field.Name] = res.field_count
// Increment field count
res.field_count++
// Read next packet
goto loop
case pkt0 < 254 && res.field_count == len(res.fields):
// Row Data Packet
if res.binary {
return my.getBinRowPacket(pr, res)
} else {
return my.getTextRowPacket(pr, res)
}
}
}
panic(UNK_RESULT_PKT_ERROR)
}
func (my *Conn) getOkPacket(pr *pktReader) (res *Result) {
if my.Debug {
log.Printf("[%2d ->] OK packet:", my.seq-1)
}
res = new(Result)
res.status_only = true
res.my = my
// First byte was readed by getResult
res.affected_rows = readLCB(pr)
res.insert_id = readLCB(pr)
res.status = readU16(pr)
my.status = res.status
res.warning_count = int(readU16(pr))
res.message = pr.readAll()
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"AffectedRows=%d InsertId=0x%x Status=0x%x "+
"WarningCount=%d Message=\"%s\"", res.affected_rows, res.insert_id,
res.status, res.warning_count, res.message,
)
}
return
}
func (my *Conn) getErrorPacket(pr *pktReader) {
if my.Debug {
log.Printf("[%2d ->] Error packet:", my.seq-1)
}
var err mysql.Error
err.Code = readU16(pr)
if readByte(pr) != '#' {
panic(PKT_ERROR)
}
read(pr, 5)
err.Msg = pr.readAll()
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"code=0x%x msg=\"%s\"", err.Code, err.Msg)
}
panic(&err)
}
func (my *Conn) getEofPacket(pr *pktReader) (warn_count int, status uint16) {
if my.Debug {
log.Printf("[%2d ->] EOF packet:", my.seq-1)
}
warn_count = int(readU16(pr))
status = readU16(pr)
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"WarningCount=%d Status=0x%x", warn_count, status)
}
return
}
func (my *Conn) getResSetHeadPacket(pr *pktReader) (res *Result) {
if my.Debug {
log.Printf("[%2d ->] Result set header packet:", my.seq-1)
}
pr.unreadByte()
field_count := int(readLCB(pr))
pr.checkEof()
res = &Result{
my: my,
fields: make([]*mysql.Field, field_count),
fc_map: make(map[string]int),
}
if my.Debug {
log.Printf(tab8s+"FieldCount=%d", field_count)
}
return
}
func (my *Conn) getFieldPacket(pr *pktReader) (field *mysql.Field) {
if my.Debug {
log.Printf("[%2d ->] Field packet:", my.seq-1)
}
pr.unreadByte()
field = new(mysql.Field)
field.Catalog = readStr(pr)
field.Db = readStr(pr)
field.Table = readStr(pr)
field.OrgTable = readStr(pr)
field.Name = readStr(pr)
field.OrgName = readStr(pr)
read(pr, 1+2)
//field.Charset= readU16(pr)
field.DispLen = readU32(pr)
field.Type = readByte(pr)
field.Flags = readU16(pr)
field.Scale = readByte(pr)
read(pr, 2)
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"Name=\"%s\" Type=0x%x", field.Name, field.Type)
}
return
}
func (my *Conn) getTextRowPacket(pr *pktReader, res *Result) mysql.Row {
if my.Debug {
log.Printf("[%2d ->] Text row data packet", my.seq-1)
}
pr.unreadByte()
row := make(mysql.Row, res.field_count)
for ii := 0; ii < res.field_count; ii++ {
bin, null := readNullBin(pr)
if null {
row[ii] = nil
} else {
row[ii] = bin
}
}
pr.checkEof()
return row
}
func (my *Conn) getBinRowPacket(pr *pktReader, res *Result) mysql.Row {
if my.Debug {
log.Printf("[%2d ->] Binary row data packet", my.seq-1)
}
// First byte was readed by getResult
null_bitmap := make([]byte, (res.field_count+7+2)>>3)
readFull(pr, null_bitmap)
row := make(mysql.Row, res.field_count)
for ii, field := range res.fields {
null_byte := (ii + 2) >> 3
null_mask := byte(1) << uint(2+ii-(null_byte<<3))
if null_bitmap[null_byte]&null_mask != 0 {
// Null field
row[ii] = nil
continue
}
typ := field.Type
unsigned := (field.Flags & _FLAG_UNSIGNED) != 0
switch typ {
case MYSQL_TYPE_TINY:
if unsigned {
row[ii] = readByte(pr)
} else {
row[ii] = int8(readByte(pr))
}
case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
if unsigned {
row[ii] = readU16(pr)
} else {
row[ii] = int16(readU16(pr))
}
case MYSQL_TYPE_LONG, MYSQL_TYPE_INT24:
if unsigned {
row[ii] = readU32(pr)
} else {
row[ii] = int32(readU32(pr))
}
case MYSQL_TYPE_LONGLONG:
if unsigned {
row[ii] = readU64(pr)
} else {
row[ii] = int64(readU64(pr))
}
case MYSQL_TYPE_FLOAT:
row[ii] = math.Float32frombits(readU32(pr))
case MYSQL_TYPE_DOUBLE:
row[ii] = math.Float64frombits(readU64(pr))
case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL:
dec := string(readBin(pr))
var err error
row[ii], err = strconv.ParseFloat(dec, 64)
if err != nil {
panic("MySQL server returned wrong decimal value: " + dec)
}
case MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT, MYSQL_TYPE_BLOB, MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_SET,
MYSQL_TYPE_ENUM, MYSQL_TYPE_GEOMETRY:
row[ii] = readBin(pr)
case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE:
row[ii] = readDate(pr)
case MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP:
row[ii] = readTime(pr)
case MYSQL_TYPE_TIME:
row[ii] = readDuration(pr)
default:
panic(UNK_MYSQL_TYPE_ERROR)
}
}
return row
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.